From 10c62304b6dfa922f15c56db2d357b643e0fd5e8 Mon Sep 17 00:00:00 2001 From: Jegor Sidorenko Date: Mon, 29 Aug 2022 18:23:53 +0300 Subject: [PATCH 001/101] Copy Uniques into Nfts --- frame/nfts/Cargo.toml | 48 + frame/nfts/README.md | 78 ++ frame/nfts/src/benchmarking.rs | 447 ++++++++ frame/nfts/src/functions.rs | 276 +++++ frame/nfts/src/impl_nonfungibles.rs | 188 ++++ frame/nfts/src/lib.rs | 1498 +++++++++++++++++++++++++++ frame/nfts/src/migration.rs | 57 + frame/nfts/src/mock.rs | 114 ++ frame/nfts/src/tests.rs | 872 ++++++++++++++++ frame/nfts/src/types.rs | 129 +++ frame/nfts/src/weights.rs | 510 +++++++++ 11 files changed, 4217 insertions(+) create mode 100644 frame/nfts/Cargo.toml create mode 100644 frame/nfts/README.md create mode 100644 frame/nfts/src/benchmarking.rs create mode 100644 frame/nfts/src/functions.rs create mode 100644 frame/nfts/src/impl_nonfungibles.rs create mode 100644 frame/nfts/src/lib.rs create mode 100644 frame/nfts/src/migration.rs create mode 100644 frame/nfts/src/mock.rs create mode 100644 frame/nfts/src/tests.rs create mode 100644 frame/nfts/src/types.rs create mode 100644 frame/nfts/src/weights.rs diff --git a/frame/nfts/Cargo.toml b/frame/nfts/Cargo.toml new file mode 100644 index 0000000000000..19b0790947f84 --- /dev/null +++ b/frame/nfts/Cargo.toml @@ -0,0 +1,48 @@ +[package] +name = "pallet-uniques" +version = "4.0.0-dev" +authors = ["Parity Technologies "] +edition = "2021" +license = "Apache-2.0" +homepage = "https://substrate.io" +repository = "https://github.com/paritytech/substrate/" +description = "FRAME NFT asset management pallet" +readme = "README.md" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false } +log = { version = "0.4.17", default-features = false } +scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } +frame-benchmarking = { version = "4.0.0-dev", default-features = false, optional = true, path = "../benchmarking" } +frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } +frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } +sp-runtime = { version = "6.0.0", default-features = false, path = "../../primitives/runtime" } +sp-std = { version = "4.0.0", default-features = false, path = "../../primitives/std" } + +[dev-dependencies] +pallet-balances = { version = "4.0.0-dev", path = "../balances" } +sp-core = { version = "6.0.0", path = "../../primitives/core" } +sp-io = { version = "6.0.0", path = "../../primitives/io" } +sp-std = { version = "4.0.0", path = "../../primitives/std" } + +[features] +default = ["std"] +std = [ + "codec/std", + "frame-benchmarking/std", + "frame-support/std", + "frame-system/std", + "log/std", + "scale-info/std", + "sp-runtime/std", + "sp-std/std", +] +runtime-benchmarks = [ + "frame-benchmarking/runtime-benchmarks", + "frame-system/runtime-benchmarks", + "sp-runtime/runtime-benchmarks", +] +try-runtime = ["frame-support/try-runtime"] diff --git a/frame/nfts/README.md b/frame/nfts/README.md new file mode 100644 index 0000000000000..8a91a558b5b5f --- /dev/null +++ b/frame/nfts/README.md @@ -0,0 +1,78 @@ +# Uniques Module + +A simple, secure module for dealing with non-fungible assets. + +## Overview + +The Uniques module provides functionality for asset management of non-fungible asset classes, including: + +* Asset Issuance +* Asset Transfer +* Asset Destruction + +To use it in your runtime, you need to implement the assets [`uniques::Config`](https://paritytech.github.io/substrate/master/pallet_uniques/pallet/trait.Config.html). + +The supported dispatchable functions are documented in the [`uniques::Call`](https://paritytech.github.io/substrate/master/pallet_uniques/pallet/enum.Call.html) enum. + +### Terminology + +* **Asset issuance:** The creation of a new asset instance. +* **Asset transfer:** The action of transferring an asset instance from one account to another. +* **Asset burning:** The destruction of an asset instance. +* **Non-fungible asset:** An asset for which each unit has unique characteristics. There is exactly + one instance of such an asset in existence and there is exactly one owning account. + +### Goals + +The Uniques pallet in Substrate is designed to make the following possible: + +* Allow accounts to permissionlessly create asset classes (collections of asset instances). +* Allow a named (permissioned) account to mint and burn unique assets within a class. +* Move asset instances between accounts permissionlessly. +* Allow a named (permissioned) account to freeze and unfreeze unique assets within a + class or the entire class. +* Allow the owner of an asset instance to delegate the ability to transfer the asset to some + named third-party. + +## Interface + +### Permissionless dispatchables +* `create`: Create a new asset class by placing a deposit. +* `transfer`: Transfer an asset instance to a new owner. +* `redeposit`: Update the deposit amount of an asset instance, potentially freeing funds. +* `approve_transfer`: Name a delegate who may authorise a transfer. +* `cancel_approval`: Revert the effects of a previous `approve_transfer`. + +### Permissioned dispatchables +* `destroy`: Destroy an asset class. +* `mint`: Mint a new asset instance within an asset class. +* `burn`: Burn an asset instance within an asset class. +* `freeze`: Prevent an individual asset from being transferred. +* `thaw`: Revert the effects of a previous `freeze`. +* `freeze_class`: Prevent all asset within a class from being transferred. +* `thaw_class`: Revert the effects of a previous `freeze_class`. +* `transfer_ownership`: Alter the owner of an asset class, moving all associated deposits. +* `set_team`: Alter the permissioned accounts of an asset class. + +### Metadata (permissioned) dispatchables +* `set_attribute`: Set a metadata attribute of an asset instance or class. +* `clear_attribute`: Remove a metadata attribute of an asset instance or class. +* `set_metadata`: Set general metadata of an asset instance. +* `clear_metadata`: Remove general metadata of an asset instance. +* `set_class_metadata`: Set general metadata of an asset class. +* `clear_class_metadata`: Remove general metadata of an asset class. + +### Force (i.e. governance) dispatchables +* `force_create`: Create a new asset class. +* `force_asset_status`: Alter the underlying characteristics of an asset class. + +Please refer to the [`Call`](https://paritytech.github.io/substrate/master/pallet_uniques/pallet/enum.Call.html) enum +and its associated variants for documentation on each function. + +## Related Modules + +* [`System`](https://docs.rs/frame-system/latest/frame_system/) +* [`Support`](https://docs.rs/frame-support/latest/frame_support/) +* [`Assets`](https://docs.rs/pallet-assets/latest/pallet_assets/) + +License: Apache-2.0 diff --git a/frame/nfts/src/benchmarking.rs b/frame/nfts/src/benchmarking.rs new file mode 100644 index 0000000000000..3e3148b5b5fc2 --- /dev/null +++ b/frame/nfts/src/benchmarking.rs @@ -0,0 +1,447 @@ +// This file is part of Substrate. + +// Copyright (C) 2020-2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Uniques pallet benchmarking. + +#![cfg(feature = "runtime-benchmarks")] + +use super::*; +use frame_benchmarking::{ + account, benchmarks_instance_pallet, whitelist_account, whitelisted_caller, +}; +use frame_support::{ + dispatch::UnfilteredDispatchable, + traits::{EnsureOrigin, Get}, + BoundedVec, +}; +use frame_system::RawOrigin as SystemOrigin; +use sp_runtime::traits::Bounded; +use sp_std::prelude::*; + +use crate::Pallet as Uniques; + +const SEED: u32 = 0; + +fn create_collection, I: 'static>( +) -> (T::CollectionId, T::AccountId, AccountIdLookupOf) { + let caller: T::AccountId = whitelisted_caller(); + let caller_lookup = T::Lookup::unlookup(caller.clone()); + let collection = T::Helper::collection(0); + T::Currency::make_free_balance_be(&caller, DepositBalanceOf::::max_value()); + assert!(Uniques::::force_create( + SystemOrigin::Root.into(), + collection, + caller_lookup.clone(), + false, + ) + .is_ok()); + (collection, caller, caller_lookup) +} + +fn add_collection_metadata, I: 'static>() -> (T::AccountId, AccountIdLookupOf) { + let caller = Collection::::get(T::Helper::collection(0)).unwrap().owner; + if caller != whitelisted_caller() { + whitelist_account!(caller); + } + let caller_lookup = T::Lookup::unlookup(caller.clone()); + assert!(Uniques::::set_collection_metadata( + SystemOrigin::Signed(caller.clone()).into(), + T::Helper::collection(0), + vec![0; T::StringLimit::get() as usize].try_into().unwrap(), + false, + ) + .is_ok()); + (caller, caller_lookup) +} + +fn mint_item, I: 'static>( + index: u16, +) -> (T::ItemId, T::AccountId, AccountIdLookupOf) { + let caller = Collection::::get(T::Helper::collection(0)).unwrap().admin; + if caller != whitelisted_caller() { + whitelist_account!(caller); + } + let caller_lookup = T::Lookup::unlookup(caller.clone()); + let item = T::Helper::item(index); + assert!(Uniques::::mint( + SystemOrigin::Signed(caller.clone()).into(), + T::Helper::collection(0), + item, + caller_lookup.clone(), + ) + .is_ok()); + (item, caller, caller_lookup) +} + +fn add_item_metadata, I: 'static>( + item: T::ItemId, +) -> (T::AccountId, AccountIdLookupOf) { + let caller = Collection::::get(T::Helper::collection(0)).unwrap().owner; + if caller != whitelisted_caller() { + whitelist_account!(caller); + } + let caller_lookup = T::Lookup::unlookup(caller.clone()); + assert!(Uniques::::set_metadata( + SystemOrigin::Signed(caller.clone()).into(), + T::Helper::collection(0), + item, + vec![0; T::StringLimit::get() as usize].try_into().unwrap(), + false, + ) + .is_ok()); + (caller, caller_lookup) +} + +fn add_item_attribute, I: 'static>( + item: T::ItemId, +) -> (BoundedVec, T::AccountId, AccountIdLookupOf) { + let caller = Collection::::get(T::Helper::collection(0)).unwrap().owner; + if caller != whitelisted_caller() { + whitelist_account!(caller); + } + let caller_lookup = T::Lookup::unlookup(caller.clone()); + let key: BoundedVec<_, _> = vec![0; T::KeyLimit::get() as usize].try_into().unwrap(); + assert!(Uniques::::set_attribute( + SystemOrigin::Signed(caller.clone()).into(), + T::Helper::collection(0), + Some(item), + key.clone(), + vec![0; T::ValueLimit::get() as usize].try_into().unwrap(), + ) + .is_ok()); + (key, caller, caller_lookup) +} + +fn assert_last_event, I: 'static>(generic_event: >::Event) { + let events = frame_system::Pallet::::events(); + let system_event: ::Event = generic_event.into(); + // compare to the last event record + let frame_system::EventRecord { event, .. } = &events[events.len() - 1]; + assert_eq!(event, &system_event); +} + +benchmarks_instance_pallet! { + create { + let collection = T::Helper::collection(0); + let origin = T::CreateOrigin::successful_origin(&collection); + let caller = T::CreateOrigin::ensure_origin(origin.clone(), &collection).unwrap(); + whitelist_account!(caller); + let admin = T::Lookup::unlookup(caller.clone()); + T::Currency::make_free_balance_be(&caller, DepositBalanceOf::::max_value()); + let call = Call::::create { collection, admin }; + }: { call.dispatch_bypass_filter(origin)? } + verify { + assert_last_event::(Event::Created { collection: T::Helper::collection(0), creator: caller.clone(), owner: caller }.into()); + } + + force_create { + let caller: T::AccountId = whitelisted_caller(); + let caller_lookup = T::Lookup::unlookup(caller.clone()); + }: _(SystemOrigin::Root, T::Helper::collection(0), caller_lookup, true) + verify { + assert_last_event::(Event::ForceCreated { collection: T::Helper::collection(0), owner: caller }.into()); + } + + destroy { + let n in 0 .. 1_000; + let m in 0 .. 1_000; + let a in 0 .. 1_000; + + let (collection, caller, caller_lookup) = create_collection::(); + add_collection_metadata::(); + for i in 0..n { + mint_item::(i as u16); + } + for i in 0..m { + add_item_metadata::(T::Helper::item(i as u16)); + } + for i in 0..a { + add_item_attribute::(T::Helper::item(i as u16)); + } + let witness = Collection::::get(collection).unwrap().destroy_witness(); + }: _(SystemOrigin::Signed(caller), collection, witness) + verify { + assert_last_event::(Event::Destroyed { collection }.into()); + } + + mint { + let (collection, caller, caller_lookup) = create_collection::(); + let item = T::Helper::item(0); + }: _(SystemOrigin::Signed(caller.clone()), collection, item, caller_lookup) + verify { + assert_last_event::(Event::Issued { collection, item, owner: caller }.into()); + } + + burn { + let (collection, caller, caller_lookup) = create_collection::(); + let (item, ..) = mint_item::(0); + }: _(SystemOrigin::Signed(caller.clone()), collection, item, Some(caller_lookup)) + verify { + assert_last_event::(Event::Burned { collection, item, owner: caller }.into()); + } + + transfer { + let (collection, caller, caller_lookup) = create_collection::(); + let (item, ..) = mint_item::(0); + + let target: T::AccountId = account("target", 0, SEED); + let target_lookup = T::Lookup::unlookup(target.clone()); + }: _(SystemOrigin::Signed(caller.clone()), collection, item, target_lookup) + verify { + assert_last_event::(Event::Transferred { collection, item, from: caller, to: target }.into()); + } + + redeposit { + let i in 0 .. 5_000; + let (collection, caller, caller_lookup) = create_collection::(); + let items = (0..i).map(|x| mint_item::(x as u16).0).collect::>(); + Uniques::::force_item_status( + SystemOrigin::Root.into(), + collection, + caller_lookup.clone(), + caller_lookup.clone(), + caller_lookup.clone(), + caller_lookup, + true, + false, + )?; + }: _(SystemOrigin::Signed(caller.clone()), collection, items.clone()) + verify { + assert_last_event::(Event::Redeposited { collection, successful_items: items }.into()); + } + + freeze { + let (collection, caller, caller_lookup) = create_collection::(); + let (item, ..) = mint_item::(0); + }: _(SystemOrigin::Signed(caller.clone()), T::Helper::collection(0), T::Helper::item(0)) + verify { + assert_last_event::(Event::Frozen { collection: T::Helper::collection(0), item: T::Helper::item(0) }.into()); + } + + thaw { + let (collection, caller, caller_lookup) = create_collection::(); + let (item, ..) = mint_item::(0); + Uniques::::freeze( + SystemOrigin::Signed(caller.clone()).into(), + collection, + item, + )?; + }: _(SystemOrigin::Signed(caller.clone()), collection, item) + verify { + assert_last_event::(Event::Thawed { collection, item }.into()); + } + + freeze_collection { + let (collection, caller, caller_lookup) = create_collection::(); + }: _(SystemOrigin::Signed(caller.clone()), collection) + verify { + assert_last_event::(Event::CollectionFrozen { collection }.into()); + } + + thaw_collection { + let (collection, caller, caller_lookup) = create_collection::(); + let origin = SystemOrigin::Signed(caller.clone()).into(); + Uniques::::freeze_collection(origin, collection)?; + }: _(SystemOrigin::Signed(caller.clone()), collection) + verify { + assert_last_event::(Event::CollectionThawed { collection }.into()); + } + + transfer_ownership { + let (collection, caller, _) = create_collection::(); + let target: T::AccountId = account("target", 0, SEED); + let target_lookup = T::Lookup::unlookup(target.clone()); + T::Currency::make_free_balance_be(&target, T::Currency::minimum_balance()); + let origin = SystemOrigin::Signed(target.clone()).into(); + Uniques::::set_accept_ownership(origin, Some(collection))?; + }: _(SystemOrigin::Signed(caller), collection, target_lookup) + verify { + assert_last_event::(Event::OwnerChanged { collection, new_owner: target }.into()); + } + + set_team { + let (collection, caller, _) = create_collection::(); + let target0 = T::Lookup::unlookup(account("target", 0, SEED)); + let target1 = T::Lookup::unlookup(account("target", 1, SEED)); + let target2 = T::Lookup::unlookup(account("target", 2, SEED)); + }: _(SystemOrigin::Signed(caller), collection, target0, target1, target2) + verify { + assert_last_event::(Event::TeamChanged{ + collection, + issuer: account("target", 0, SEED), + admin: account("target", 1, SEED), + freezer: account("target", 2, SEED), + }.into()); + } + + force_item_status { + let (collection, caller, caller_lookup) = create_collection::(); + let origin = T::ForceOrigin::successful_origin(); + let call = Call::::force_item_status { + collection, + owner: caller_lookup.clone(), + issuer: caller_lookup.clone(), + admin: caller_lookup.clone(), + freezer: caller_lookup, + free_holding: true, + is_frozen: false, + }; + }: { call.dispatch_bypass_filter(origin)? } + verify { + assert_last_event::(Event::ItemStatusChanged { collection }.into()); + } + + set_attribute { + let key: BoundedVec<_, _> = vec![0u8; T::KeyLimit::get() as usize].try_into().unwrap(); + let value: BoundedVec<_, _> = vec![0u8; T::ValueLimit::get() as usize].try_into().unwrap(); + + let (collection, caller, _) = create_collection::(); + let (item, ..) = mint_item::(0); + add_item_metadata::(item); + }: _(SystemOrigin::Signed(caller), collection, Some(item), key.clone(), value.clone()) + verify { + assert_last_event::(Event::AttributeSet { collection, maybe_item: Some(item), key, value }.into()); + } + + clear_attribute { + let (collection, caller, _) = create_collection::(); + let (item, ..) = mint_item::(0); + add_item_metadata::(item); + let (key, ..) = add_item_attribute::(item); + }: _(SystemOrigin::Signed(caller), collection, Some(item), key.clone()) + verify { + assert_last_event::(Event::AttributeCleared { collection, maybe_item: Some(item), key }.into()); + } + + set_metadata { + let data: BoundedVec<_, _> = vec![0u8; T::StringLimit::get() as usize].try_into().unwrap(); + + let (collection, caller, _) = create_collection::(); + let (item, ..) = mint_item::(0); + }: _(SystemOrigin::Signed(caller), collection, item, data.clone(), false) + verify { + assert_last_event::(Event::MetadataSet { collection, item, data, is_frozen: false }.into()); + } + + clear_metadata { + let (collection, caller, _) = create_collection::(); + let (item, ..) = mint_item::(0); + add_item_metadata::(item); + }: _(SystemOrigin::Signed(caller), collection, item) + verify { + assert_last_event::(Event::MetadataCleared { collection, item }.into()); + } + + set_collection_metadata { + let data: BoundedVec<_, _> = vec![0u8; T::StringLimit::get() as usize].try_into().unwrap(); + + let (collection, caller, _) = create_collection::(); + }: _(SystemOrigin::Signed(caller), collection, data.clone(), false) + verify { + assert_last_event::(Event::CollectionMetadataSet { collection, data, is_frozen: false }.into()); + } + + clear_collection_metadata { + let (collection, caller, _) = create_collection::(); + add_collection_metadata::(); + }: _(SystemOrigin::Signed(caller), collection) + verify { + assert_last_event::(Event::CollectionMetadataCleared { collection }.into()); + } + + approve_transfer { + let (collection, caller, _) = create_collection::(); + let (item, ..) = mint_item::(0); + let delegate: T::AccountId = account("delegate", 0, SEED); + let delegate_lookup = T::Lookup::unlookup(delegate.clone()); + }: _(SystemOrigin::Signed(caller.clone()), collection, item, delegate_lookup) + verify { + assert_last_event::(Event::ApprovedTransfer { collection, item, owner: caller, delegate }.into()); + } + + cancel_approval { + let (collection, caller, _) = create_collection::(); + let (item, ..) = mint_item::(0); + let delegate: T::AccountId = account("delegate", 0, SEED); + let delegate_lookup = T::Lookup::unlookup(delegate.clone()); + let origin = SystemOrigin::Signed(caller.clone()).into(); + Uniques::::approve_transfer(origin, collection, item, delegate_lookup.clone())?; + }: _(SystemOrigin::Signed(caller.clone()), collection, item, Some(delegate_lookup)) + verify { + assert_last_event::(Event::ApprovalCancelled { collection, item, owner: caller, delegate }.into()); + } + + set_accept_ownership { + let caller: T::AccountId = whitelisted_caller(); + T::Currency::make_free_balance_be(&caller, DepositBalanceOf::::max_value()); + let collection = T::Helper::collection(0); + }: _(SystemOrigin::Signed(caller.clone()), Some(collection)) + verify { + assert_last_event::(Event::OwnershipAcceptanceChanged { + who: caller, + maybe_collection: Some(collection), + }.into()); + } + + set_collection_max_supply { + let (collection, caller, _) = create_collection::(); + }: _(SystemOrigin::Signed(caller.clone()), collection, u32::MAX) + verify { + assert_last_event::(Event::CollectionMaxSupplySet { + collection, + max_supply: u32::MAX, + }.into()); + } + + set_price { + let (collection, caller, _) = create_collection::(); + let (item, ..) = mint_item::(0); + let delegate: T::AccountId = account("delegate", 0, SEED); + let delegate_lookup = T::Lookup::unlookup(delegate.clone()); + let price = ItemPrice::::from(100u32); + }: _(SystemOrigin::Signed(caller.clone()), collection, item, Some(price), Some(delegate_lookup)) + verify { + assert_last_event::(Event::ItemPriceSet { + collection, + item, + price, + whitelisted_buyer: Some(delegate), + }.into()); + } + + buy_item { + let (collection, seller, _) = create_collection::(); + let (item, ..) = mint_item::(0); + let buyer: T::AccountId = account("buyer", 0, SEED); + let buyer_lookup = T::Lookup::unlookup(buyer.clone()); + let price = ItemPrice::::from(0u32); + let origin = SystemOrigin::Signed(seller.clone()).into(); + Uniques::::set_price(origin, collection, item, Some(price.clone()), Some(buyer_lookup))?; + T::Currency::make_free_balance_be(&buyer, DepositBalanceOf::::max_value()); + }: _(SystemOrigin::Signed(buyer.clone()), collection, item, price.clone()) + verify { + assert_last_event::(Event::ItemBought { + collection, + item, + price, + seller, + buyer, + }.into()); + } + + impl_benchmark_test_suite!(Uniques, crate::mock::new_test_ext(), crate::mock::Test); +} diff --git a/frame/nfts/src/functions.rs b/frame/nfts/src/functions.rs new file mode 100644 index 0000000000000..107214558307f --- /dev/null +++ b/frame/nfts/src/functions.rs @@ -0,0 +1,276 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Various pieces of common functionality. + +use super::*; +use frame_support::{ + ensure, + traits::{ExistenceRequirement, Get}, +}; +use sp_runtime::{DispatchError, DispatchResult}; + +impl, I: 'static> Pallet { + pub fn do_transfer( + collection: T::CollectionId, + item: T::ItemId, + dest: T::AccountId, + with_details: impl FnOnce( + &CollectionDetailsFor, + &mut ItemDetailsFor, + ) -> DispatchResult, + ) -> DispatchResult { + let collection_details = + Collection::::get(&collection).ok_or(Error::::UnknownCollection)?; + ensure!(!collection_details.is_frozen, Error::::Frozen); + ensure!(!T::Locker::is_locked(collection, item), Error::::Locked); + + let mut details = + Item::::get(&collection, &item).ok_or(Error::::UnknownCollection)?; + ensure!(!details.is_frozen, Error::::Frozen); + with_details(&collection_details, &mut details)?; + + Account::::remove((&details.owner, &collection, &item)); + Account::::insert((&dest, &collection, &item), ()); + let origin = details.owner; + details.owner = dest; + Item::::insert(&collection, &item, &details); + ItemPriceOf::::remove(&collection, &item); + + Self::deposit_event(Event::Transferred { + collection, + item, + from: origin, + to: details.owner, + }); + Ok(()) + } + + pub fn do_create_collection( + collection: T::CollectionId, + owner: T::AccountId, + admin: T::AccountId, + deposit: DepositBalanceOf, + free_holding: bool, + event: Event, + ) -> DispatchResult { + ensure!(!Collection::::contains_key(collection), Error::::InUse); + + T::Currency::reserve(&owner, deposit)?; + + Collection::::insert( + collection, + CollectionDetails { + owner: owner.clone(), + issuer: admin.clone(), + admin: admin.clone(), + freezer: admin, + total_deposit: deposit, + free_holding, + items: 0, + item_metadatas: 0, + attributes: 0, + is_frozen: false, + }, + ); + + CollectionAccount::::insert(&owner, &collection, ()); + Self::deposit_event(event); + Ok(()) + } + + pub fn do_destroy_collection( + collection: T::CollectionId, + witness: DestroyWitness, + maybe_check_owner: Option, + ) -> Result { + Collection::::try_mutate_exists(collection, |maybe_details| { + let collection_details = + maybe_details.take().ok_or(Error::::UnknownCollection)?; + if let Some(check_owner) = maybe_check_owner { + ensure!(collection_details.owner == check_owner, Error::::NoPermission); + } + ensure!(collection_details.items == witness.items, Error::::BadWitness); + ensure!( + collection_details.item_metadatas == witness.item_metadatas, + Error::::BadWitness + ); + ensure!(collection_details.attributes == witness.attributes, Error::::BadWitness); + + for (item, details) in Item::::drain_prefix(&collection) { + Account::::remove((&details.owner, &collection, &item)); + } + #[allow(deprecated)] + ItemMetadataOf::::remove_prefix(&collection, None); + #[allow(deprecated)] + ItemPriceOf::::remove_prefix(&collection, None); + CollectionMetadataOf::::remove(&collection); + #[allow(deprecated)] + Attribute::::remove_prefix((&collection,), None); + CollectionAccount::::remove(&collection_details.owner, &collection); + T::Currency::unreserve(&collection_details.owner, collection_details.total_deposit); + CollectionMaxSupply::::remove(&collection); + + Self::deposit_event(Event::Destroyed { collection }); + + Ok(DestroyWitness { + items: collection_details.items, + item_metadatas: collection_details.item_metadatas, + attributes: collection_details.attributes, + }) + }) + } + + pub fn do_mint( + collection: T::CollectionId, + item: T::ItemId, + owner: T::AccountId, + with_details: impl FnOnce(&CollectionDetailsFor) -> DispatchResult, + ) -> DispatchResult { + ensure!(!Item::::contains_key(collection, item), Error::::AlreadyExists); + + Collection::::try_mutate( + &collection, + |maybe_collection_details| -> DispatchResult { + let collection_details = + maybe_collection_details.as_mut().ok_or(Error::::UnknownCollection)?; + + with_details(collection_details)?; + + if let Ok(max_supply) = CollectionMaxSupply::::try_get(&collection) { + ensure!(collection_details.items < max_supply, Error::::MaxSupplyReached); + } + + let items = + collection_details.items.checked_add(1).ok_or(ArithmeticError::Overflow)?; + collection_details.items = items; + + let deposit = match collection_details.free_holding { + true => Zero::zero(), + false => T::ItemDeposit::get(), + }; + T::Currency::reserve(&collection_details.owner, deposit)?; + collection_details.total_deposit += deposit; + + let owner = owner.clone(); + Account::::insert((&owner, &collection, &item), ()); + let details = ItemDetails { owner, approved: None, is_frozen: false, deposit }; + Item::::insert(&collection, &item, details); + Ok(()) + }, + )?; + + Self::deposit_event(Event::Issued { collection, item, owner }); + Ok(()) + } + + pub fn do_burn( + collection: T::CollectionId, + item: T::ItemId, + with_details: impl FnOnce(&CollectionDetailsFor, &ItemDetailsFor) -> DispatchResult, + ) -> DispatchResult { + let owner = Collection::::try_mutate( + &collection, + |maybe_collection_details| -> Result { + let collection_details = + maybe_collection_details.as_mut().ok_or(Error::::UnknownCollection)?; + let details = Item::::get(&collection, &item) + .ok_or(Error::::UnknownCollection)?; + with_details(collection_details, &details)?; + + // Return the deposit. + T::Currency::unreserve(&collection_details.owner, details.deposit); + collection_details.total_deposit.saturating_reduce(details.deposit); + collection_details.items.saturating_dec(); + Ok(details.owner) + }, + )?; + + Item::::remove(&collection, &item); + Account::::remove((&owner, &collection, &item)); + ItemPriceOf::::remove(&collection, &item); + + Self::deposit_event(Event::Burned { collection, item, owner }); + Ok(()) + } + + pub fn do_set_price( + collection: T::CollectionId, + item: T::ItemId, + sender: T::AccountId, + price: Option>, + whitelisted_buyer: Option, + ) -> DispatchResult { + let details = Item::::get(&collection, &item).ok_or(Error::::UnknownItem)?; + ensure!(details.owner == sender, Error::::NoPermission); + + if let Some(ref price) = price { + ItemPriceOf::::insert(&collection, &item, (price, whitelisted_buyer.clone())); + Self::deposit_event(Event::ItemPriceSet { + collection, + item, + price: *price, + whitelisted_buyer, + }); + } else { + ItemPriceOf::::remove(&collection, &item); + Self::deposit_event(Event::ItemPriceRemoved { collection, item }); + } + + Ok(()) + } + + pub fn do_buy_item( + collection: T::CollectionId, + item: T::ItemId, + buyer: T::AccountId, + bid_price: ItemPrice, + ) -> DispatchResult { + let details = Item::::get(&collection, &item).ok_or(Error::::UnknownItem)?; + ensure!(details.owner != buyer, Error::::NoPermission); + + let price_info = + ItemPriceOf::::get(&collection, &item).ok_or(Error::::NotForSale)?; + + ensure!(bid_price >= price_info.0, Error::::BidTooLow); + + if let Some(only_buyer) = price_info.1 { + ensure!(only_buyer == buyer, Error::::NoPermission); + } + + T::Currency::transfer( + &buyer, + &details.owner, + price_info.0, + ExistenceRequirement::KeepAlive, + )?; + + let old_owner = details.owner.clone(); + + Self::do_transfer(collection, item, buyer.clone(), |_, _| Ok(()))?; + + Self::deposit_event(Event::ItemBought { + collection, + item, + price: price_info.0, + seller: old_owner, + buyer, + }); + + Ok(()) + } +} diff --git a/frame/nfts/src/impl_nonfungibles.rs b/frame/nfts/src/impl_nonfungibles.rs new file mode 100644 index 0000000000000..cead6f562ab58 --- /dev/null +++ b/frame/nfts/src/impl_nonfungibles.rs @@ -0,0 +1,188 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Implementations for `nonfungibles` traits. + +use super::*; +use frame_support::{ + traits::{tokens::nonfungibles::*, Get}, + BoundedSlice, +}; +use sp_runtime::{DispatchError, DispatchResult}; +use sp_std::prelude::*; + +impl, I: 'static> Inspect<::AccountId> for Pallet { + type ItemId = T::ItemId; + type CollectionId = T::CollectionId; + + fn owner( + collection: &Self::CollectionId, + item: &Self::ItemId, + ) -> Option<::AccountId> { + Item::::get(collection, item).map(|a| a.owner) + } + + fn collection_owner(collection: &Self::CollectionId) -> Option<::AccountId> { + Collection::::get(collection).map(|a| a.owner) + } + + /// Returns the attribute value of `item` of `collection` corresponding to `key`. + /// + /// When `key` is empty, we return the item metadata value. + /// + /// By default this is `None`; no attributes are defined. + fn attribute( + collection: &Self::CollectionId, + item: &Self::ItemId, + key: &[u8], + ) -> Option> { + if key.is_empty() { + // We make the empty key map to the item metadata value. + ItemMetadataOf::::get(collection, item).map(|m| m.data.into()) + } else { + let key = BoundedSlice::<_, _>::try_from(key).ok()?; + Attribute::::get((collection, Some(item), key)).map(|a| a.0.into()) + } + } + + /// Returns the attribute value of `item` of `collection` corresponding to `key`. + /// + /// When `key` is empty, we return the item metadata value. + /// + /// By default this is `None`; no attributes are defined. + fn collection_attribute(collection: &Self::CollectionId, key: &[u8]) -> Option> { + if key.is_empty() { + // We make the empty key map to the item metadata value. + CollectionMetadataOf::::get(collection).map(|m| m.data.into()) + } else { + let key = BoundedSlice::<_, _>::try_from(key).ok()?; + Attribute::::get((collection, Option::::None, key)).map(|a| a.0.into()) + } + } + + /// Returns `true` if the `item` of `collection` may be transferred. + /// + /// Default implementation is that all items are transferable. + fn can_transfer(collection: &Self::CollectionId, item: &Self::ItemId) -> bool { + match (Collection::::get(collection), Item::::get(collection, item)) { + (Some(cd), Some(id)) if !cd.is_frozen && !id.is_frozen => true, + _ => false, + } + } +} + +impl, I: 'static> Create<::AccountId> for Pallet { + /// Create a `collection` of nonfungible items to be owned by `who` and managed by `admin`. + fn create_collection( + collection: &Self::CollectionId, + who: &T::AccountId, + admin: &T::AccountId, + ) -> DispatchResult { + Self::do_create_collection( + *collection, + who.clone(), + admin.clone(), + T::CollectionDeposit::get(), + false, + Event::Created { collection: *collection, creator: who.clone(), owner: admin.clone() }, + ) + } +} + +impl, I: 'static> Destroy<::AccountId> for Pallet { + type DestroyWitness = DestroyWitness; + + fn get_destroy_witness(collection: &Self::CollectionId) -> Option { + Collection::::get(collection).map(|a| a.destroy_witness()) + } + + fn destroy( + collection: Self::CollectionId, + witness: Self::DestroyWitness, + maybe_check_owner: Option, + ) -> Result { + Self::do_destroy_collection(collection, witness, maybe_check_owner) + } +} + +impl, I: 'static> Mutate<::AccountId> for Pallet { + fn mint_into( + collection: &Self::CollectionId, + item: &Self::ItemId, + who: &T::AccountId, + ) -> DispatchResult { + Self::do_mint(*collection, *item, who.clone(), |_| Ok(())) + } + + fn burn( + collection: &Self::CollectionId, + item: &Self::ItemId, + maybe_check_owner: Option<&T::AccountId>, + ) -> DispatchResult { + Self::do_burn(*collection, *item, |_, d| { + if let Some(check_owner) = maybe_check_owner { + if &d.owner != check_owner { + return Err(Error::::NoPermission.into()) + } + } + Ok(()) + }) + } +} + +impl, I: 'static> Transfer for Pallet { + fn transfer( + collection: &Self::CollectionId, + item: &Self::ItemId, + destination: &T::AccountId, + ) -> DispatchResult { + Self::do_transfer(*collection, *item, destination.clone(), |_, _| Ok(())) + } +} + +impl, I: 'static> InspectEnumerable for Pallet { + /// Returns an iterator of the collections in existence. + /// + /// NOTE: iterating this list invokes a storage read per item. + fn collections() -> Box> { + Box::new(CollectionMetadataOf::::iter_keys()) + } + + /// Returns an iterator of the items of a `collection` in existence. + /// + /// NOTE: iterating this list invokes a storage read per item. + fn items(collection: &Self::CollectionId) -> Box> { + Box::new(ItemMetadataOf::::iter_key_prefix(collection)) + } + + /// Returns an iterator of the items of all collections owned by `who`. + /// + /// NOTE: iterating this list invokes a storage read per item. + fn owned(who: &T::AccountId) -> Box> { + Box::new(Account::::iter_key_prefix((who,))) + } + + /// Returns an iterator of the items of `collection` owned by `who`. + /// + /// NOTE: iterating this list invokes a storage read per item. + fn owned_in_collection( + collection: &Self::CollectionId, + who: &T::AccountId, + ) -> Box> { + Box::new(Account::::iter_key_prefix((who, collection))) + } +} diff --git a/frame/nfts/src/lib.rs b/frame/nfts/src/lib.rs new file mode 100644 index 0000000000000..70f10ca4f8b39 --- /dev/null +++ b/frame/nfts/src/lib.rs @@ -0,0 +1,1498 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! # Unique (Items) Module +//! +//! A simple, secure module for dealing with non-fungible items. +//! +//! ## Related Modules +//! +//! * [`System`](../frame_system/index.html) +//! * [`Support`](../frame_support/index.html) + +#![recursion_limit = "256"] +// Ensure we're `no_std` when compiling for Wasm. +#![cfg_attr(not(feature = "std"), no_std)] + +#[cfg(feature = "runtime-benchmarks")] +mod benchmarking; +#[cfg(test)] +pub mod mock; +#[cfg(test)] +mod tests; + +mod functions; +mod impl_nonfungibles; +mod types; + +pub mod migration; +pub mod weights; + +use codec::{Decode, Encode}; +use frame_support::{ + traits::{ + tokens::Locker, BalanceStatus::Reserved, Currency, EnsureOriginWithArg, ReservableCurrency, + }, + transactional, +}; +use frame_system::Config as SystemConfig; +use sp_runtime::{ + traits::{Saturating, StaticLookup, Zero}, + ArithmeticError, RuntimeDebug, +}; +use sp_std::prelude::*; + +pub use pallet::*; +pub use types::*; +pub use weights::WeightInfo; + +type AccountIdLookupOf = <::Lookup as StaticLookup>::Source; + +#[frame_support::pallet] +pub mod pallet { + use super::*; + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; + + #[pallet::pallet] + #[pallet::generate_store(pub(super) trait Store)] + pub struct Pallet(_); + + #[cfg(feature = "runtime-benchmarks")] + pub trait BenchmarkHelper { + fn collection(i: u16) -> CollectionId; + fn item(i: u16) -> ItemId; + } + #[cfg(feature = "runtime-benchmarks")] + impl, ItemId: From> BenchmarkHelper for () { + fn collection(i: u16) -> CollectionId { + i.into() + } + fn item(i: u16) -> ItemId { + i.into() + } + } + + #[pallet::config] + /// The module configuration trait. + pub trait Config: frame_system::Config { + /// The overarching event type. + type Event: From> + IsType<::Event>; + + /// Identifier for the collection of item. + type CollectionId: Member + Parameter + MaxEncodedLen + Copy; + + /// The type used to identify a unique item within a collection. + type ItemId: Member + Parameter + MaxEncodedLen + Copy; + + /// The currency mechanism, used for paying for reserves. + type Currency: ReservableCurrency; + + /// The origin which may forcibly create or destroy an item or otherwise alter privileged + /// attributes. + type ForceOrigin: EnsureOrigin; + + /// Standard collection creation is only allowed if the origin attempting it and the + /// collection are in this set. + type CreateOrigin: EnsureOriginWithArg< + Success = Self::AccountId, + Self::Origin, + Self::CollectionId, + >; + + /// Locker trait to enable Locking mechanism downstream. + type Locker: Locker; + + /// The basic amount of funds that must be reserved for collection. + #[pallet::constant] + type CollectionDeposit: Get>; + + /// The basic amount of funds that must be reserved for an item. + #[pallet::constant] + type ItemDeposit: Get>; + + /// The basic amount of funds that must be reserved when adding metadata to your item. + #[pallet::constant] + type MetadataDepositBase: Get>; + + /// The basic amount of funds that must be reserved when adding an attribute to an item. + #[pallet::constant] + type AttributeDepositBase: Get>; + + /// The additional funds that must be reserved for the number of bytes store in metadata, + /// either "normal" metadata or attribute metadata. + #[pallet::constant] + type DepositPerByte: Get>; + + /// The maximum length of data stored on-chain. + #[pallet::constant] + type StringLimit: Get; + + /// The maximum length of an attribute key. + #[pallet::constant] + type KeyLimit: Get; + + /// The maximum length of an attribute value. + #[pallet::constant] + type ValueLimit: Get; + + #[cfg(feature = "runtime-benchmarks")] + /// A set of helper functions for benchmarking. + type Helper: BenchmarkHelper; + + /// Weight information for extrinsics in this pallet. + type WeightInfo: WeightInfo; + } + + #[pallet::storage] + #[pallet::storage_prefix = "Class"] + /// Details of a collection. + pub(super) type Collection, I: 'static = ()> = StorageMap< + _, + Blake2_128Concat, + T::CollectionId, + CollectionDetails>, + >; + + #[pallet::storage] + /// The collection, if any, of which an account is willing to take ownership. + pub(super) type OwnershipAcceptance, I: 'static = ()> = + StorageMap<_, Blake2_128Concat, T::AccountId, T::CollectionId>; + + #[pallet::storage] + /// The items held by any given account; set out this way so that items owned by a single + /// account can be enumerated. + pub(super) type Account, I: 'static = ()> = StorageNMap< + _, + ( + NMapKey, // owner + NMapKey, + NMapKey, + ), + (), + OptionQuery, + >; + + #[pallet::storage] + #[pallet::storage_prefix = "ClassAccount"] + /// The collections owned by any given account; set out this way so that collections owned by + /// a single account can be enumerated. + pub(super) type CollectionAccount, I: 'static = ()> = StorageDoubleMap< + _, + Blake2_128Concat, + T::AccountId, + Blake2_128Concat, + T::CollectionId, + (), + OptionQuery, + >; + + #[pallet::storage] + #[pallet::storage_prefix = "Asset"] + /// The items in existence and their ownership details. + pub(super) type Item, I: 'static = ()> = StorageDoubleMap< + _, + Blake2_128Concat, + T::CollectionId, + Blake2_128Concat, + T::ItemId, + ItemDetails>, + OptionQuery, + >; + + #[pallet::storage] + #[pallet::storage_prefix = "ClassMetadataOf"] + /// Metadata of a collection. + pub(super) type CollectionMetadataOf, I: 'static = ()> = StorageMap< + _, + Blake2_128Concat, + T::CollectionId, + CollectionMetadata, T::StringLimit>, + OptionQuery, + >; + + #[pallet::storage] + #[pallet::storage_prefix = "InstanceMetadataOf"] + /// Metadata of an item. + pub(super) type ItemMetadataOf, I: 'static = ()> = StorageDoubleMap< + _, + Blake2_128Concat, + T::CollectionId, + Blake2_128Concat, + T::ItemId, + ItemMetadata, T::StringLimit>, + OptionQuery, + >; + + #[pallet::storage] + /// Attributes of a collection. + pub(super) type Attribute, I: 'static = ()> = StorageNMap< + _, + ( + NMapKey, + NMapKey>, + NMapKey>, + ), + (BoundedVec, DepositBalanceOf), + OptionQuery, + >; + + #[pallet::storage] + /// Price of an asset instance. + pub(super) type ItemPriceOf, I: 'static = ()> = StorageDoubleMap< + _, + Blake2_128Concat, + T::CollectionId, + Blake2_128Concat, + T::ItemId, + (ItemPrice, Option), + OptionQuery, + >; + + #[pallet::storage] + /// Keeps track of the number of items a collection might have. + pub(super) type CollectionMaxSupply, I: 'static = ()> = + StorageMap<_, Blake2_128Concat, T::CollectionId, u32, OptionQuery>; + + #[pallet::event] + #[pallet::generate_deposit(pub(super) fn deposit_event)] + pub enum Event, I: 'static = ()> { + /// A `collection` was created. + Created { collection: T::CollectionId, creator: T::AccountId, owner: T::AccountId }, + /// A `collection` was force-created. + ForceCreated { collection: T::CollectionId, owner: T::AccountId }, + /// A `collection` was destroyed. + Destroyed { collection: T::CollectionId }, + /// An `item` was issued. + Issued { collection: T::CollectionId, item: T::ItemId, owner: T::AccountId }, + /// An `item` was transferred. + Transferred { + collection: T::CollectionId, + item: T::ItemId, + from: T::AccountId, + to: T::AccountId, + }, + /// An `item` was destroyed. + Burned { collection: T::CollectionId, item: T::ItemId, owner: T::AccountId }, + /// Some `item` was frozen. + Frozen { collection: T::CollectionId, item: T::ItemId }, + /// Some `item` was thawed. + Thawed { collection: T::CollectionId, item: T::ItemId }, + /// Some `collection` was frozen. + CollectionFrozen { collection: T::CollectionId }, + /// Some `collection` was thawed. + CollectionThawed { collection: T::CollectionId }, + /// The owner changed. + OwnerChanged { collection: T::CollectionId, new_owner: T::AccountId }, + /// The management team changed. + TeamChanged { + collection: T::CollectionId, + issuer: T::AccountId, + admin: T::AccountId, + freezer: T::AccountId, + }, + /// An `item` of a `collection` has been approved by the `owner` for transfer by + /// a `delegate`. + ApprovedTransfer { + collection: T::CollectionId, + item: T::ItemId, + owner: T::AccountId, + delegate: T::AccountId, + }, + /// An approval for a `delegate` account to transfer the `item` of an item + /// `collection` was cancelled by its `owner`. + ApprovalCancelled { + collection: T::CollectionId, + item: T::ItemId, + owner: T::AccountId, + delegate: T::AccountId, + }, + /// A `collection` has had its attributes changed by the `Force` origin. + ItemStatusChanged { collection: T::CollectionId }, + /// New metadata has been set for a `collection`. + CollectionMetadataSet { + collection: T::CollectionId, + data: BoundedVec, + is_frozen: bool, + }, + /// Metadata has been cleared for a `collection`. + CollectionMetadataCleared { collection: T::CollectionId }, + /// New metadata has been set for an item. + MetadataSet { + collection: T::CollectionId, + item: T::ItemId, + data: BoundedVec, + is_frozen: bool, + }, + /// Metadata has been cleared for an item. + MetadataCleared { collection: T::CollectionId, item: T::ItemId }, + /// Metadata has been cleared for an item. + Redeposited { collection: T::CollectionId, successful_items: Vec }, + /// New attribute metadata has been set for a `collection` or `item`. + AttributeSet { + collection: T::CollectionId, + maybe_item: Option, + key: BoundedVec, + value: BoundedVec, + }, + /// Attribute metadata has been cleared for a `collection` or `item`. + AttributeCleared { + collection: T::CollectionId, + maybe_item: Option, + key: BoundedVec, + }, + /// Ownership acceptance has changed for an account. + OwnershipAcceptanceChanged { who: T::AccountId, maybe_collection: Option }, + /// Max supply has been set for a collection. + CollectionMaxSupplySet { collection: T::CollectionId, max_supply: u32 }, + /// The price was set for the instance. + ItemPriceSet { + collection: T::CollectionId, + item: T::ItemId, + price: ItemPrice, + whitelisted_buyer: Option, + }, + /// The price for the instance was removed. + ItemPriceRemoved { collection: T::CollectionId, item: T::ItemId }, + /// An item was bought. + ItemBought { + collection: T::CollectionId, + item: T::ItemId, + price: ItemPrice, + seller: T::AccountId, + buyer: T::AccountId, + }, + } + + #[pallet::error] + pub enum Error { + /// The signing account has no permission to do the operation. + NoPermission, + /// The given item ID is unknown. + UnknownCollection, + /// The item ID has already been used for an item. + AlreadyExists, + /// The owner turned out to be different to what was expected. + WrongOwner, + /// Invalid witness data given. + BadWitness, + /// The item ID is already taken. + InUse, + /// The item or collection is frozen. + Frozen, + /// The delegate turned out to be different to what was expected. + WrongDelegate, + /// There is no delegate approved. + NoDelegate, + /// No approval exists that would allow the transfer. + Unapproved, + /// The named owner has not signed ownership of the collection is acceptable. + Unaccepted, + /// The item is locked. + Locked, + /// All items have been minted. + MaxSupplyReached, + /// The max supply has already been set. + MaxSupplyAlreadySet, + /// The provided max supply is less to the amount of items a collection already has. + MaxSupplyTooSmall, + /// The given item ID is unknown. + UnknownItem, + /// Item is not for sale. + NotForSale, + /// The provided bid is too low. + BidTooLow, + } + + impl, I: 'static> Pallet { + /// Get the owner of the item, if the item exists. + pub fn owner(collection: T::CollectionId, item: T::ItemId) -> Option { + Item::::get(collection, item).map(|i| i.owner) + } + + /// Get the owner of the item, if the item exists. + pub fn collection_owner(collection: T::CollectionId) -> Option { + Collection::::get(collection).map(|i| i.owner) + } + } + + #[pallet::call] + impl, I: 'static> Pallet { + /// Issue a new collection of non-fungible items from a public origin. + /// + /// This new collection has no items initially and its owner is the origin. + /// + /// The origin must be Signed and the sender must have sufficient funds free. + /// + /// `ItemDeposit` funds of sender are reserved. + /// + /// Parameters: + /// - `collection`: The identifier of the new collection. This must not be currently in use. + /// - `admin`: The admin of this collection. The admin is the initial address of each + /// member of the collection's admin team. + /// + /// Emits `Created` event when successful. + /// + /// Weight: `O(1)` + #[pallet::weight(T::WeightInfo::create())] + pub fn create( + origin: OriginFor, + collection: T::CollectionId, + admin: AccountIdLookupOf, + ) -> DispatchResult { + let owner = T::CreateOrigin::ensure_origin(origin, &collection)?; + let admin = T::Lookup::lookup(admin)?; + + Self::do_create_collection( + collection, + owner.clone(), + admin.clone(), + T::CollectionDeposit::get(), + false, + Event::Created { collection, creator: owner, owner: admin }, + ) + } + + /// Issue a new collection of non-fungible items from a privileged origin. + /// + /// This new collection has no items initially. + /// + /// The origin must conform to `ForceOrigin`. + /// + /// Unlike `create`, no funds are reserved. + /// + /// - `collection`: The identifier of the new item. This must not be currently in use. + /// - `owner`: The owner of this collection of items. The owner has full superuser + /// permissions + /// over this item, but may later change and configure the permissions using + /// `transfer_ownership` and `set_team`. + /// + /// Emits `ForceCreated` event when successful. + /// + /// Weight: `O(1)` + #[pallet::weight(T::WeightInfo::force_create())] + pub fn force_create( + origin: OriginFor, + collection: T::CollectionId, + owner: AccountIdLookupOf, + free_holding: bool, + ) -> DispatchResult { + T::ForceOrigin::ensure_origin(origin)?; + let owner = T::Lookup::lookup(owner)?; + + Self::do_create_collection( + collection, + owner.clone(), + owner.clone(), + Zero::zero(), + free_holding, + Event::ForceCreated { collection, owner }, + ) + } + + /// Destroy a collection of fungible items. + /// + /// The origin must conform to `ForceOrigin` or must be `Signed` and the sender must be the + /// owner of the `collection`. + /// + /// - `collection`: The identifier of the collection to be destroyed. + /// - `witness`: Information on the items minted in the collection. This must be + /// correct. + /// + /// Emits `Destroyed` event when successful. + /// + /// Weight: `O(n + m)` where: + /// - `n = witness.items` + /// - `m = witness.item_metadatas` + /// - `a = witness.attributes` + #[pallet::weight(T::WeightInfo::destroy( + witness.items, + witness.item_metadatas, + witness.attributes, + ))] + pub fn destroy( + origin: OriginFor, + collection: T::CollectionId, + witness: DestroyWitness, + ) -> DispatchResultWithPostInfo { + let maybe_check_owner = match T::ForceOrigin::try_origin(origin) { + Ok(_) => None, + Err(origin) => Some(ensure_signed(origin)?), + }; + let details = Self::do_destroy_collection(collection, witness, maybe_check_owner)?; + + Ok(Some(T::WeightInfo::destroy( + details.items, + details.item_metadatas, + details.attributes, + )) + .into()) + } + + /// Mint an item of a particular collection. + /// + /// The origin must be Signed and the sender must be the Issuer of the `collection`. + /// + /// - `collection`: The collection of the item to be minted. + /// - `item`: The item value of the item to be minted. + /// - `beneficiary`: The initial owner of the minted item. + /// + /// Emits `Issued` event when successful. + /// + /// Weight: `O(1)` + #[pallet::weight(T::WeightInfo::mint())] + pub fn mint( + origin: OriginFor, + collection: T::CollectionId, + item: T::ItemId, + owner: AccountIdLookupOf, + ) -> DispatchResult { + let origin = ensure_signed(origin)?; + let owner = T::Lookup::lookup(owner)?; + + Self::do_mint(collection, item, owner, |collection_details| { + ensure!(collection_details.issuer == origin, Error::::NoPermission); + Ok(()) + }) + } + + /// Destroy a single item. + /// + /// Origin must be Signed and the sender should be the Admin of the `collection`. + /// + /// - `collection`: The collection of the item to be burned. + /// - `item`: The item of the item to be burned. + /// - `check_owner`: If `Some` then the operation will fail with `WrongOwner` unless the + /// item is owned by this value. + /// + /// Emits `Burned` with the actual amount burned. + /// + /// Weight: `O(1)` + /// Modes: `check_owner.is_some()`. + #[pallet::weight(T::WeightInfo::burn())] + pub fn burn( + origin: OriginFor, + collection: T::CollectionId, + item: T::ItemId, + check_owner: Option>, + ) -> DispatchResult { + let origin = ensure_signed(origin)?; + let check_owner = check_owner.map(T::Lookup::lookup).transpose()?; + + Self::do_burn(collection, item, |collection_details, details| { + let is_permitted = collection_details.admin == origin || details.owner == origin; + ensure!(is_permitted, Error::::NoPermission); + ensure!( + check_owner.map_or(true, |o| o == details.owner), + Error::::WrongOwner + ); + Ok(()) + }) + } + + /// Move an item from the sender account to another. + /// + /// Origin must be Signed and the signing account must be either: + /// - the Admin of the `collection`; + /// - the Owner of the `item`; + /// - the approved delegate for the `item` (in this case, the approval is reset). + /// + /// Arguments: + /// - `collection`: The collection of the item to be transferred. + /// - `item`: The item of the item to be transferred. + /// - `dest`: The account to receive ownership of the item. + /// + /// Emits `Transferred`. + /// + /// Weight: `O(1)` + #[pallet::weight(T::WeightInfo::transfer())] + pub fn transfer( + origin: OriginFor, + collection: T::CollectionId, + item: T::ItemId, + dest: AccountIdLookupOf, + ) -> DispatchResult { + let origin = ensure_signed(origin)?; + let dest = T::Lookup::lookup(dest)?; + + Self::do_transfer(collection, item, dest, |collection_details, details| { + if details.owner != origin && collection_details.admin != origin { + let approved = details.approved.take().map_or(false, |i| i == origin); + ensure!(approved, Error::::NoPermission); + } + Ok(()) + }) + } + + /// Reevaluate the deposits on some items. + /// + /// Origin must be Signed and the sender should be the Owner of the `collection`. + /// + /// - `collection`: The collection to be frozen. + /// - `items`: The items of the collection whose deposits will be reevaluated. + /// + /// NOTE: This exists as a best-effort function. Any items which are unknown or + /// in the case that the owner account does not have reservable funds to pay for a + /// deposit increase are ignored. Generally the owner isn't going to call this on items + /// whose existing deposit is less than the refreshed deposit as it would only cost them, + /// so it's of little consequence. + /// + /// It will still return an error in the case that the collection is unknown of the signer + /// is not permitted to call it. + /// + /// Weight: `O(items.len())` + #[pallet::weight(T::WeightInfo::redeposit(items.len() as u32))] + pub fn redeposit( + origin: OriginFor, + collection: T::CollectionId, + items: Vec, + ) -> DispatchResult { + let origin = ensure_signed(origin)?; + + let mut collection_details = + Collection::::get(&collection).ok_or(Error::::UnknownCollection)?; + ensure!(collection_details.owner == origin, Error::::NoPermission); + let deposit = match collection_details.free_holding { + true => Zero::zero(), + false => T::ItemDeposit::get(), + }; + + let mut successful = Vec::with_capacity(items.len()); + for item in items.into_iter() { + let mut details = match Item::::get(&collection, &item) { + Some(x) => x, + None => continue, + }; + let old = details.deposit; + if old > deposit { + T::Currency::unreserve(&collection_details.owner, old - deposit); + } else if deposit > old { + if T::Currency::reserve(&collection_details.owner, deposit - old).is_err() { + // NOTE: No alterations made to collection_details in this iteration so far, + // so this is OK to do. + continue + } + } else { + continue + } + collection_details.total_deposit.saturating_accrue(deposit); + collection_details.total_deposit.saturating_reduce(old); + details.deposit = deposit; + Item::::insert(&collection, &item, &details); + successful.push(item); + } + Collection::::insert(&collection, &collection_details); + + Self::deposit_event(Event::::Redeposited { + collection, + successful_items: successful, + }); + + Ok(()) + } + + /// Disallow further unprivileged transfer of an item. + /// + /// Origin must be Signed and the sender should be the Freezer of the `collection`. + /// + /// - `collection`: The collection of the item to be frozen. + /// - `item`: The item of the item to be frozen. + /// + /// Emits `Frozen`. + /// + /// Weight: `O(1)` + #[pallet::weight(T::WeightInfo::freeze())] + pub fn freeze( + origin: OriginFor, + collection: T::CollectionId, + item: T::ItemId, + ) -> DispatchResult { + let origin = ensure_signed(origin)?; + + let mut details = + Item::::get(&collection, &item).ok_or(Error::::UnknownCollection)?; + let collection_details = + Collection::::get(&collection).ok_or(Error::::UnknownCollection)?; + ensure!(collection_details.freezer == origin, Error::::NoPermission); + + details.is_frozen = true; + Item::::insert(&collection, &item, &details); + + Self::deposit_event(Event::::Frozen { collection, item }); + Ok(()) + } + + /// Re-allow unprivileged transfer of an item. + /// + /// Origin must be Signed and the sender should be the Freezer of the `collection`. + /// + /// - `collection`: The collection of the item to be thawed. + /// - `item`: The item of the item to be thawed. + /// + /// Emits `Thawed`. + /// + /// Weight: `O(1)` + #[pallet::weight(T::WeightInfo::thaw())] + pub fn thaw( + origin: OriginFor, + collection: T::CollectionId, + item: T::ItemId, + ) -> DispatchResult { + let origin = ensure_signed(origin)?; + + let mut details = + Item::::get(&collection, &item).ok_or(Error::::UnknownCollection)?; + let collection_details = + Collection::::get(&collection).ok_or(Error::::UnknownCollection)?; + ensure!(collection_details.admin == origin, Error::::NoPermission); + + details.is_frozen = false; + Item::::insert(&collection, &item, &details); + + Self::deposit_event(Event::::Thawed { collection, item }); + Ok(()) + } + + /// Disallow further unprivileged transfers for a whole collection. + /// + /// Origin must be Signed and the sender should be the Freezer of the `collection`. + /// + /// - `collection`: The collection to be frozen. + /// + /// Emits `CollectionFrozen`. + /// + /// Weight: `O(1)` + #[pallet::weight(T::WeightInfo::freeze_collection())] + pub fn freeze_collection( + origin: OriginFor, + collection: T::CollectionId, + ) -> DispatchResult { + let origin = ensure_signed(origin)?; + + Collection::::try_mutate(collection, |maybe_details| { + let details = maybe_details.as_mut().ok_or(Error::::UnknownCollection)?; + ensure!(origin == details.freezer, Error::::NoPermission); + + details.is_frozen = true; + + Self::deposit_event(Event::::CollectionFrozen { collection }); + Ok(()) + }) + } + + /// Re-allow unprivileged transfers for a whole collection. + /// + /// Origin must be Signed and the sender should be the Admin of the `collection`. + /// + /// - `collection`: The collection to be thawed. + /// + /// Emits `CollectionThawed`. + /// + /// Weight: `O(1)` + #[pallet::weight(T::WeightInfo::thaw_collection())] + pub fn thaw_collection( + origin: OriginFor, + collection: T::CollectionId, + ) -> DispatchResult { + let origin = ensure_signed(origin)?; + + Collection::::try_mutate(collection, |maybe_details| { + let details = maybe_details.as_mut().ok_or(Error::::UnknownCollection)?; + ensure!(origin == details.admin, Error::::NoPermission); + + details.is_frozen = false; + + Self::deposit_event(Event::::CollectionThawed { collection }); + Ok(()) + }) + } + + /// Change the Owner of a collection. + /// + /// Origin must be Signed and the sender should be the Owner of the `collection`. + /// + /// - `collection`: The collection whose owner should be changed. + /// - `owner`: The new Owner of this collection. They must have called + /// `set_accept_ownership` with `collection` in order for this operation to succeed. + /// + /// Emits `OwnerChanged`. + /// + /// Weight: `O(1)` + #[pallet::weight(T::WeightInfo::transfer_ownership())] + pub fn transfer_ownership( + origin: OriginFor, + collection: T::CollectionId, + owner: AccountIdLookupOf, + ) -> DispatchResult { + let origin = ensure_signed(origin)?; + let owner = T::Lookup::lookup(owner)?; + + let acceptable_collection = OwnershipAcceptance::::get(&owner); + ensure!(acceptable_collection.as_ref() == Some(&collection), Error::::Unaccepted); + + Collection::::try_mutate(collection, |maybe_details| { + let details = maybe_details.as_mut().ok_or(Error::::UnknownCollection)?; + ensure!(origin == details.owner, Error::::NoPermission); + if details.owner == owner { + return Ok(()) + } + + // Move the deposit to the new owner. + T::Currency::repatriate_reserved( + &details.owner, + &owner, + details.total_deposit, + Reserved, + )?; + CollectionAccount::::remove(&details.owner, &collection); + CollectionAccount::::insert(&owner, &collection, ()); + details.owner = owner.clone(); + OwnershipAcceptance::::remove(&owner); + + Self::deposit_event(Event::OwnerChanged { collection, new_owner: owner }); + Ok(()) + }) + } + + /// Change the Issuer, Admin and Freezer of a collection. + /// + /// Origin must be Signed and the sender should be the Owner of the `collection`. + /// + /// - `collection`: The collection whose team should be changed. + /// - `issuer`: The new Issuer of this collection. + /// - `admin`: The new Admin of this collection. + /// - `freezer`: The new Freezer of this collection. + /// + /// Emits `TeamChanged`. + /// + /// Weight: `O(1)` + #[pallet::weight(T::WeightInfo::set_team())] + pub fn set_team( + origin: OriginFor, + collection: T::CollectionId, + issuer: AccountIdLookupOf, + admin: AccountIdLookupOf, + freezer: AccountIdLookupOf, + ) -> DispatchResult { + let origin = ensure_signed(origin)?; + let issuer = T::Lookup::lookup(issuer)?; + let admin = T::Lookup::lookup(admin)?; + let freezer = T::Lookup::lookup(freezer)?; + + Collection::::try_mutate(collection, |maybe_details| { + let details = maybe_details.as_mut().ok_or(Error::::UnknownCollection)?; + ensure!(origin == details.owner, Error::::NoPermission); + + details.issuer = issuer.clone(); + details.admin = admin.clone(); + details.freezer = freezer.clone(); + + Self::deposit_event(Event::TeamChanged { collection, issuer, admin, freezer }); + Ok(()) + }) + } + + /// Approve an item to be transferred by a delegated third-party account. + /// + /// Origin must be Signed and must be the owner of the `item`. + /// + /// - `collection`: The collection of the item to be approved for delegated transfer. + /// - `item`: The item of the item to be approved for delegated transfer. + /// - `delegate`: The account to delegate permission to transfer the item. + /// + /// Emits `ApprovedTransfer` on success. + /// + /// Weight: `O(1)` + #[pallet::weight(T::WeightInfo::approve_transfer())] + pub fn approve_transfer( + origin: OriginFor, + collection: T::CollectionId, + item: T::ItemId, + delegate: AccountIdLookupOf, + ) -> DispatchResult { + let maybe_check: Option = T::ForceOrigin::try_origin(origin) + .map(|_| None) + .or_else(|origin| ensure_signed(origin).map(Some).map_err(DispatchError::from))?; + + let delegate = T::Lookup::lookup(delegate)?; + + let collection_details = + Collection::::get(&collection).ok_or(Error::::UnknownCollection)?; + let mut details = + Item::::get(&collection, &item).ok_or(Error::::UnknownCollection)?; + + if let Some(check) = maybe_check { + let permitted = check == collection_details.admin || check == details.owner; + ensure!(permitted, Error::::NoPermission); + } + + details.approved = Some(delegate); + Item::::insert(&collection, &item, &details); + + let delegate = details.approved.expect("set as Some above; qed"); + Self::deposit_event(Event::ApprovedTransfer { + collection, + item, + owner: details.owner, + delegate, + }); + + Ok(()) + } + + /// Cancel the prior approval for the transfer of an item by a delegate. + /// + /// Origin must be either: + /// - the `Force` origin; + /// - `Signed` with the signer being the Admin of the `collection`; + /// - `Signed` with the signer being the Owner of the `item`; + /// + /// Arguments: + /// - `collection`: The collection of the item of whose approval will be cancelled. + /// - `item`: The item of the item of whose approval will be cancelled. + /// - `maybe_check_delegate`: If `Some` will ensure that the given account is the one to + /// which permission of transfer is delegated. + /// + /// Emits `ApprovalCancelled` on success. + /// + /// Weight: `O(1)` + #[pallet::weight(T::WeightInfo::cancel_approval())] + pub fn cancel_approval( + origin: OriginFor, + collection: T::CollectionId, + item: T::ItemId, + maybe_check_delegate: Option>, + ) -> DispatchResult { + let maybe_check: Option = T::ForceOrigin::try_origin(origin) + .map(|_| None) + .or_else(|origin| ensure_signed(origin).map(Some).map_err(DispatchError::from))?; + + let collection_details = + Collection::::get(&collection).ok_or(Error::::UnknownCollection)?; + let mut details = + Item::::get(&collection, &item).ok_or(Error::::UnknownCollection)?; + if let Some(check) = maybe_check { + let permitted = check == collection_details.admin || check == details.owner; + ensure!(permitted, Error::::NoPermission); + } + let maybe_check_delegate = maybe_check_delegate.map(T::Lookup::lookup).transpose()?; + let old = details.approved.take().ok_or(Error::::NoDelegate)?; + if let Some(check_delegate) = maybe_check_delegate { + ensure!(check_delegate == old, Error::::WrongDelegate); + } + + Item::::insert(&collection, &item, &details); + Self::deposit_event(Event::ApprovalCancelled { + collection, + item, + owner: details.owner, + delegate: old, + }); + + Ok(()) + } + + /// Alter the attributes of a given item. + /// + /// Origin must be `ForceOrigin`. + /// + /// - `collection`: The identifier of the item. + /// - `owner`: The new Owner of this item. + /// - `issuer`: The new Issuer of this item. + /// - `admin`: The new Admin of this item. + /// - `freezer`: The new Freezer of this item. + /// - `free_holding`: Whether a deposit is taken for holding an item of this collection. + /// - `is_frozen`: Whether this collection is frozen except for permissioned/admin + /// instructions. + /// + /// Emits `ItemStatusChanged` with the identity of the item. + /// + /// Weight: `O(1)` + #[pallet::weight(T::WeightInfo::force_item_status())] + pub fn force_item_status( + origin: OriginFor, + collection: T::CollectionId, + owner: AccountIdLookupOf, + issuer: AccountIdLookupOf, + admin: AccountIdLookupOf, + freezer: AccountIdLookupOf, + free_holding: bool, + is_frozen: bool, + ) -> DispatchResult { + T::ForceOrigin::ensure_origin(origin)?; + + Collection::::try_mutate(collection, |maybe_item| { + let mut item = maybe_item.take().ok_or(Error::::UnknownCollection)?; + let old_owner = item.owner; + let new_owner = T::Lookup::lookup(owner)?; + item.owner = new_owner.clone(); + item.issuer = T::Lookup::lookup(issuer)?; + item.admin = T::Lookup::lookup(admin)?; + item.freezer = T::Lookup::lookup(freezer)?; + item.free_holding = free_holding; + item.is_frozen = is_frozen; + *maybe_item = Some(item); + CollectionAccount::::remove(&old_owner, &collection); + CollectionAccount::::insert(&new_owner, &collection, ()); + + Self::deposit_event(Event::ItemStatusChanged { collection }); + Ok(()) + }) + } + + /// Set an attribute for a collection or item. + /// + /// Origin must be either `ForceOrigin` or Signed and the sender should be the Owner of the + /// `collection`. + /// + /// If the origin is Signed, then funds of signer are reserved according to the formula: + /// `MetadataDepositBase + DepositPerByte * (key.len + value.len)` taking into + /// account any already reserved funds. + /// + /// - `collection`: The identifier of the collection whose item's metadata to set. + /// - `maybe_item`: The identifier of the item whose metadata to set. + /// - `key`: The key of the attribute. + /// - `value`: The value to which to set the attribute. + /// + /// Emits `AttributeSet`. + /// + /// Weight: `O(1)` + #[pallet::weight(T::WeightInfo::set_attribute())] + pub fn set_attribute( + origin: OriginFor, + collection: T::CollectionId, + maybe_item: Option, + key: BoundedVec, + value: BoundedVec, + ) -> DispatchResult { + let maybe_check_owner = T::ForceOrigin::try_origin(origin) + .map(|_| None) + .or_else(|origin| ensure_signed(origin).map(Some))?; + + let mut collection_details = + Collection::::get(&collection).ok_or(Error::::UnknownCollection)?; + if let Some(check_owner) = &maybe_check_owner { + ensure!(check_owner == &collection_details.owner, Error::::NoPermission); + } + let maybe_is_frozen = match maybe_item { + None => CollectionMetadataOf::::get(collection).map(|v| v.is_frozen), + Some(item) => ItemMetadataOf::::get(collection, item).map(|v| v.is_frozen), + }; + ensure!(!maybe_is_frozen.unwrap_or(false), Error::::Frozen); + + let attribute = Attribute::::get((collection, maybe_item, &key)); + if attribute.is_none() { + collection_details.attributes.saturating_inc(); + } + let old_deposit = attribute.map_or(Zero::zero(), |m| m.1); + collection_details.total_deposit.saturating_reduce(old_deposit); + let mut deposit = Zero::zero(); + if !collection_details.free_holding && maybe_check_owner.is_some() { + deposit = T::DepositPerByte::get() + .saturating_mul(((key.len() + value.len()) as u32).into()) + .saturating_add(T::AttributeDepositBase::get()); + } + collection_details.total_deposit.saturating_accrue(deposit); + if deposit > old_deposit { + T::Currency::reserve(&collection_details.owner, deposit - old_deposit)?; + } else if deposit < old_deposit { + T::Currency::unreserve(&collection_details.owner, old_deposit - deposit); + } + + Attribute::::insert((&collection, maybe_item, &key), (&value, deposit)); + Collection::::insert(collection, &collection_details); + Self::deposit_event(Event::AttributeSet { collection, maybe_item, key, value }); + Ok(()) + } + + /// Clear an attribute for a collection or item. + /// + /// Origin must be either `ForceOrigin` or Signed and the sender should be the Owner of the + /// `collection`. + /// + /// Any deposit is freed for the collection's owner. + /// + /// - `collection`: The identifier of the collection whose item's metadata to clear. + /// - `maybe_item`: The identifier of the item whose metadata to clear. + /// - `key`: The key of the attribute. + /// + /// Emits `AttributeCleared`. + /// + /// Weight: `O(1)` + #[pallet::weight(T::WeightInfo::clear_attribute())] + pub fn clear_attribute( + origin: OriginFor, + collection: T::CollectionId, + maybe_item: Option, + key: BoundedVec, + ) -> DispatchResult { + let maybe_check_owner = T::ForceOrigin::try_origin(origin) + .map(|_| None) + .or_else(|origin| ensure_signed(origin).map(Some))?; + + let mut collection_details = + Collection::::get(&collection).ok_or(Error::::UnknownCollection)?; + if let Some(check_owner) = &maybe_check_owner { + ensure!(check_owner == &collection_details.owner, Error::::NoPermission); + } + let maybe_is_frozen = match maybe_item { + None => CollectionMetadataOf::::get(collection).map(|v| v.is_frozen), + Some(item) => ItemMetadataOf::::get(collection, item).map(|v| v.is_frozen), + }; + ensure!(!maybe_is_frozen.unwrap_or(false), Error::::Frozen); + + if let Some((_, deposit)) = Attribute::::take((collection, maybe_item, &key)) { + collection_details.attributes.saturating_dec(); + collection_details.total_deposit.saturating_reduce(deposit); + T::Currency::unreserve(&collection_details.owner, deposit); + Collection::::insert(collection, &collection_details); + Self::deposit_event(Event::AttributeCleared { collection, maybe_item, key }); + } + Ok(()) + } + + /// Set the metadata for an item. + /// + /// Origin must be either `ForceOrigin` or Signed and the sender should be the Owner of the + /// `collection`. + /// + /// If the origin is Signed, then funds of signer are reserved according to the formula: + /// `MetadataDepositBase + DepositPerByte * data.len` taking into + /// account any already reserved funds. + /// + /// - `collection`: The identifier of the collection whose item's metadata to set. + /// - `item`: The identifier of the item whose metadata to set. + /// - `data`: The general information of this item. Limited in length by `StringLimit`. + /// - `is_frozen`: Whether the metadata should be frozen against further changes. + /// + /// Emits `MetadataSet`. + /// + /// Weight: `O(1)` + #[pallet::weight(T::WeightInfo::set_metadata())] + pub fn set_metadata( + origin: OriginFor, + collection: T::CollectionId, + item: T::ItemId, + data: BoundedVec, + is_frozen: bool, + ) -> DispatchResult { + let maybe_check_owner = T::ForceOrigin::try_origin(origin) + .map(|_| None) + .or_else(|origin| ensure_signed(origin).map(Some))?; + + let mut collection_details = + Collection::::get(&collection).ok_or(Error::::UnknownCollection)?; + + if let Some(check_owner) = &maybe_check_owner { + ensure!(check_owner == &collection_details.owner, Error::::NoPermission); + } + + ItemMetadataOf::::try_mutate_exists(collection, item, |metadata| { + let was_frozen = metadata.as_ref().map_or(false, |m| m.is_frozen); + ensure!(maybe_check_owner.is_none() || !was_frozen, Error::::Frozen); + + if metadata.is_none() { + collection_details.item_metadatas.saturating_inc(); + } + let old_deposit = metadata.take().map_or(Zero::zero(), |m| m.deposit); + collection_details.total_deposit.saturating_reduce(old_deposit); + let mut deposit = Zero::zero(); + if !collection_details.free_holding && maybe_check_owner.is_some() { + deposit = T::DepositPerByte::get() + .saturating_mul(((data.len()) as u32).into()) + .saturating_add(T::MetadataDepositBase::get()); + } + if deposit > old_deposit { + T::Currency::reserve(&collection_details.owner, deposit - old_deposit)?; + } else if deposit < old_deposit { + T::Currency::unreserve(&collection_details.owner, old_deposit - deposit); + } + collection_details.total_deposit.saturating_accrue(deposit); + + *metadata = Some(ItemMetadata { deposit, data: data.clone(), is_frozen }); + + Collection::::insert(&collection, &collection_details); + Self::deposit_event(Event::MetadataSet { collection, item, data, is_frozen }); + Ok(()) + }) + } + + /// Clear the metadata for an item. + /// + /// Origin must be either `ForceOrigin` or Signed and the sender should be the Owner of the + /// `item`. + /// + /// Any deposit is freed for the collection's owner. + /// + /// - `collection`: The identifier of the collection whose item's metadata to clear. + /// - `item`: The identifier of the item whose metadata to clear. + /// + /// Emits `MetadataCleared`. + /// + /// Weight: `O(1)` + #[pallet::weight(T::WeightInfo::clear_metadata())] + pub fn clear_metadata( + origin: OriginFor, + collection: T::CollectionId, + item: T::ItemId, + ) -> DispatchResult { + let maybe_check_owner = T::ForceOrigin::try_origin(origin) + .map(|_| None) + .or_else(|origin| ensure_signed(origin).map(Some))?; + + let mut collection_details = + Collection::::get(&collection).ok_or(Error::::UnknownCollection)?; + if let Some(check_owner) = &maybe_check_owner { + ensure!(check_owner == &collection_details.owner, Error::::NoPermission); + } + + ItemMetadataOf::::try_mutate_exists(collection, item, |metadata| { + let was_frozen = metadata.as_ref().map_or(false, |m| m.is_frozen); + ensure!(maybe_check_owner.is_none() || !was_frozen, Error::::Frozen); + + if metadata.is_some() { + collection_details.item_metadatas.saturating_dec(); + } + let deposit = metadata.take().ok_or(Error::::UnknownCollection)?.deposit; + T::Currency::unreserve(&collection_details.owner, deposit); + collection_details.total_deposit.saturating_reduce(deposit); + + Collection::::insert(&collection, &collection_details); + Self::deposit_event(Event::MetadataCleared { collection, item }); + Ok(()) + }) + } + + /// Set the metadata for a collection. + /// + /// Origin must be either `ForceOrigin` or `Signed` and the sender should be the Owner of + /// the `collection`. + /// + /// If the origin is `Signed`, then funds of signer are reserved according to the formula: + /// `MetadataDepositBase + DepositPerByte * data.len` taking into + /// account any already reserved funds. + /// + /// - `collection`: The identifier of the item whose metadata to update. + /// - `data`: The general information of this item. Limited in length by `StringLimit`. + /// - `is_frozen`: Whether the metadata should be frozen against further changes. + /// + /// Emits `CollectionMetadataSet`. + /// + /// Weight: `O(1)` + #[pallet::weight(T::WeightInfo::set_collection_metadata())] + pub fn set_collection_metadata( + origin: OriginFor, + collection: T::CollectionId, + data: BoundedVec, + is_frozen: bool, + ) -> DispatchResult { + let maybe_check_owner = T::ForceOrigin::try_origin(origin) + .map(|_| None) + .or_else(|origin| ensure_signed(origin).map(Some))?; + + let mut details = + Collection::::get(&collection).ok_or(Error::::UnknownCollection)?; + if let Some(check_owner) = &maybe_check_owner { + ensure!(check_owner == &details.owner, Error::::NoPermission); + } + + CollectionMetadataOf::::try_mutate_exists(collection, |metadata| { + let was_frozen = metadata.as_ref().map_or(false, |m| m.is_frozen); + ensure!(maybe_check_owner.is_none() || !was_frozen, Error::::Frozen); + + let old_deposit = metadata.take().map_or(Zero::zero(), |m| m.deposit); + details.total_deposit.saturating_reduce(old_deposit); + let mut deposit = Zero::zero(); + if maybe_check_owner.is_some() && !details.free_holding { + deposit = T::DepositPerByte::get() + .saturating_mul(((data.len()) as u32).into()) + .saturating_add(T::MetadataDepositBase::get()); + } + if deposit > old_deposit { + T::Currency::reserve(&details.owner, deposit - old_deposit)?; + } else if deposit < old_deposit { + T::Currency::unreserve(&details.owner, old_deposit - deposit); + } + details.total_deposit.saturating_accrue(deposit); + + Collection::::insert(&collection, details); + + *metadata = Some(CollectionMetadata { deposit, data: data.clone(), is_frozen }); + + Self::deposit_event(Event::CollectionMetadataSet { collection, data, is_frozen }); + Ok(()) + }) + } + + /// Clear the metadata for a collection. + /// + /// Origin must be either `ForceOrigin` or `Signed` and the sender should be the Owner of + /// the `collection`. + /// + /// Any deposit is freed for the collection's owner. + /// + /// - `collection`: The identifier of the collection whose metadata to clear. + /// + /// Emits `CollectionMetadataCleared`. + /// + /// Weight: `O(1)` + #[pallet::weight(T::WeightInfo::clear_collection_metadata())] + pub fn clear_collection_metadata( + origin: OriginFor, + collection: T::CollectionId, + ) -> DispatchResult { + let maybe_check_owner = T::ForceOrigin::try_origin(origin) + .map(|_| None) + .or_else(|origin| ensure_signed(origin).map(Some))?; + + let details = + Collection::::get(&collection).ok_or(Error::::UnknownCollection)?; + if let Some(check_owner) = &maybe_check_owner { + ensure!(check_owner == &details.owner, Error::::NoPermission); + } + + CollectionMetadataOf::::try_mutate_exists(collection, |metadata| { + let was_frozen = metadata.as_ref().map_or(false, |m| m.is_frozen); + ensure!(maybe_check_owner.is_none() || !was_frozen, Error::::Frozen); + + let deposit = metadata.take().ok_or(Error::::UnknownCollection)?.deposit; + T::Currency::unreserve(&details.owner, deposit); + Self::deposit_event(Event::CollectionMetadataCleared { collection }); + Ok(()) + }) + } + + /// Set (or reset) the acceptance of ownership for a particular account. + /// + /// Origin must be `Signed` and if `maybe_collection` is `Some`, then the signer must have a + /// provider reference. + /// + /// - `maybe_collection`: The identifier of the collection whose ownership the signer is + /// willing to accept, or if `None`, an indication that the signer is willing to accept no + /// ownership transferal. + /// + /// Emits `OwnershipAcceptanceChanged`. + #[pallet::weight(T::WeightInfo::set_accept_ownership())] + pub fn set_accept_ownership( + origin: OriginFor, + maybe_collection: Option, + ) -> DispatchResult { + let who = ensure_signed(origin)?; + let old = OwnershipAcceptance::::get(&who); + match (old.is_some(), maybe_collection.is_some()) { + (false, true) => { + frame_system::Pallet::::inc_consumers(&who)?; + }, + (true, false) => { + frame_system::Pallet::::dec_consumers(&who); + }, + _ => {}, + } + if let Some(collection) = maybe_collection.as_ref() { + OwnershipAcceptance::::insert(&who, collection); + } else { + OwnershipAcceptance::::remove(&who); + } + Self::deposit_event(Event::OwnershipAcceptanceChanged { who, maybe_collection }); + Ok(()) + } + + /// Set the maximum amount of items a collection could have. + /// + /// Origin must be either `ForceOrigin` or `Signed` and the sender should be the Owner of + /// the `collection`. + /// + /// Note: This function can only succeed once per collection. + /// + /// - `collection`: The identifier of the collection to change. + /// - `max_supply`: The maximum amount of items a collection could have. + /// + /// Emits `CollectionMaxSupplySet` event when successful. + #[pallet::weight(T::WeightInfo::set_collection_max_supply())] + pub fn set_collection_max_supply( + origin: OriginFor, + collection: T::CollectionId, + max_supply: u32, + ) -> DispatchResult { + let maybe_check_owner = T::ForceOrigin::try_origin(origin) + .map(|_| None) + .or_else(|origin| ensure_signed(origin).map(Some))?; + + ensure!( + !CollectionMaxSupply::::contains_key(&collection), + Error::::MaxSupplyAlreadySet + ); + + let details = + Collection::::get(&collection).ok_or(Error::::UnknownCollection)?; + if let Some(check_owner) = &maybe_check_owner { + ensure!(check_owner == &details.owner, Error::::NoPermission); + } + + ensure!(details.items <= max_supply, Error::::MaxSupplyTooSmall); + + CollectionMaxSupply::::insert(&collection, max_supply); + Self::deposit_event(Event::CollectionMaxSupplySet { collection, max_supply }); + Ok(()) + } + + /// Set (or reset) the price for an item. + /// + /// Origin must be Signed and must be the owner of the asset `item`. + /// + /// - `collection`: The collection of the item. + /// - `item`: The item to set the price for. + /// - `price`: The price for the item. Pass `None`, to reset the price. + /// - `buyer`: Restricts the buy operation to a specific account. + /// + /// Emits `ItemPriceSet` on success if the price is not `None`. + /// Emits `ItemPriceRemoved` on success if the price is `None`. + #[pallet::weight(T::WeightInfo::set_price())] + pub fn set_price( + origin: OriginFor, + collection: T::CollectionId, + item: T::ItemId, + price: Option>, + whitelisted_buyer: Option>, + ) -> DispatchResult { + let origin = ensure_signed(origin)?; + let whitelisted_buyer = whitelisted_buyer.map(T::Lookup::lookup).transpose()?; + Self::do_set_price(collection, item, origin, price, whitelisted_buyer) + } + + /// Allows to buy an item if it's up for sale. + /// + /// Origin must be Signed and must not be the owner of the `item`. + /// + /// - `collection`: The collection of the item. + /// - `item`: The item the sender wants to buy. + /// - `bid_price`: The price the sender is willing to pay. + /// + /// Emits `ItemBought` on success. + #[pallet::weight(T::WeightInfo::buy_item())] + #[transactional] + pub fn buy_item( + origin: OriginFor, + collection: T::CollectionId, + item: T::ItemId, + bid_price: ItemPrice, + ) -> DispatchResult { + let origin = ensure_signed(origin)?; + Self::do_buy_item(collection, item, origin, bid_price) + } + } +} diff --git a/frame/nfts/src/migration.rs b/frame/nfts/src/migration.rs new file mode 100644 index 0000000000000..d301f0a3d1eb1 --- /dev/null +++ b/frame/nfts/src/migration.rs @@ -0,0 +1,57 @@ +// This file is part of Substrate. + +// Copyright (C) 2021-2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Various pieces of common functionality. +use super::*; +use frame_support::{ + traits::{Get, GetStorageVersion, PalletInfoAccess, StorageVersion}, + weights::Weight, +}; + +/// Migrate the pallet storage to v1. +pub fn migrate_to_v1, I: 'static, P: GetStorageVersion + PalletInfoAccess>( +) -> frame_support::weights::Weight { + let on_chain_storage_version =

::on_chain_storage_version(); + log::info!( + target: "runtime::uniques", + "Running migration storage v1 for uniques with storage version {:?}", + on_chain_storage_version, + ); + + if on_chain_storage_version < 1 { + let mut count = 0; + for (collection, detail) in Collection::::iter() { + CollectionAccount::::insert(&detail.owner, &collection, ()); + count += 1; + } + StorageVersion::new(1).put::

(); + log::info!( + target: "runtime::uniques", + "Running migration storage v1 for uniques with storage version {:?} was complete", + on_chain_storage_version, + ); + // calculate and return migration weights + T::DbWeight::get().reads_writes(count as Weight + 1, count as Weight + 1) + } else { + log::warn!( + target: "runtime::uniques", + "Attempted to apply migration to v1 but failed because storage version is {:?}", + on_chain_storage_version, + ); + T::DbWeight::get().reads(1) + } +} diff --git a/frame/nfts/src/mock.rs b/frame/nfts/src/mock.rs new file mode 100644 index 0000000000000..ff7b791de4950 --- /dev/null +++ b/frame/nfts/src/mock.rs @@ -0,0 +1,114 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Test environment for Uniques pallet. + +use super::*; +use crate as pallet_uniques; + +use frame_support::{ + construct_runtime, + traits::{AsEnsureOriginWithArg, ConstU32, ConstU64}, +}; +use sp_core::H256; +use sp_runtime::{ + testing::Header, + traits::{BlakeTwo256, IdentityLookup}, +}; + +type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; +type Block = frame_system::mocking::MockBlock; + +construct_runtime!( + pub enum Test where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: frame_system::{Pallet, Call, Config, Storage, Event}, + Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, + Uniques: pallet_uniques::{Pallet, Call, Storage, Event}, + } +); + +impl frame_system::Config for Test { + type BaseCallFilter = frame_support::traits::Everything; + type BlockWeights = (); + type BlockLength = (); + type Origin = Origin; + type Call = Call; + type Index = u64; + type BlockNumber = u64; + type Hash = H256; + type Hashing = BlakeTwo256; + type AccountId = u64; + type Lookup = IdentityLookup; + type Header = Header; + type Event = Event; + type BlockHashCount = ConstU64<250>; + type DbWeight = (); + type Version = (); + type PalletInfo = PalletInfo; + type AccountData = pallet_balances::AccountData; + type OnNewAccount = (); + type OnKilledAccount = (); + type SystemWeightInfo = (); + type SS58Prefix = (); + type OnSetCode = (); + type MaxConsumers = ConstU32<16>; +} + +impl pallet_balances::Config for Test { + type Balance = u64; + type DustRemoval = (); + type Event = Event; + type ExistentialDeposit = ConstU64<1>; + type AccountStore = System; + type WeightInfo = (); + type MaxLocks = (); + type MaxReserves = ConstU32<50>; + type ReserveIdentifier = [u8; 8]; +} + +impl Config for Test { + type Event = Event; + type CollectionId = u32; + type ItemId = u32; + type Currency = Balances; + type CreateOrigin = AsEnsureOriginWithArg>; + type ForceOrigin = frame_system::EnsureRoot; + type Locker = (); + type CollectionDeposit = ConstU64<2>; + type ItemDeposit = ConstU64<1>; + type MetadataDepositBase = ConstU64<1>; + type AttributeDepositBase = ConstU64<1>; + type DepositPerByte = ConstU64<1>; + type StringLimit = ConstU32<50>; + type KeyLimit = ConstU32<50>; + type ValueLimit = ConstU32<50>; + type WeightInfo = (); + #[cfg(feature = "runtime-benchmarks")] + type Helper = (); +} + +pub(crate) fn new_test_ext() -> sp_io::TestExternalities { + let t = frame_system::GenesisConfig::default().build_storage::().unwrap(); + + let mut ext = sp_io::TestExternalities::new(t); + ext.execute_with(|| System::set_block_number(1)); + ext +} diff --git a/frame/nfts/src/tests.rs b/frame/nfts/src/tests.rs new file mode 100644 index 0000000000000..8b1d00d7ba0c7 --- /dev/null +++ b/frame/nfts/src/tests.rs @@ -0,0 +1,872 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Tests for Uniques pallet. + +use crate::{mock::*, Event, *}; +use frame_support::{assert_noop, assert_ok, dispatch::Dispatchable, traits::Currency}; +use pallet_balances::Error as BalancesError; +use sp_std::prelude::*; + +fn items() -> Vec<(u64, u32, u32)> { + let mut r: Vec<_> = Account::::iter().map(|x| x.0).collect(); + r.sort(); + let mut s: Vec<_> = Item::::iter().map(|x| (x.2.owner, x.0, x.1)).collect(); + s.sort(); + assert_eq!(r, s); + for collection in Item::::iter() + .map(|x| x.0) + .scan(None, |s, item| { + if s.map_or(false, |last| last == item) { + *s = Some(item); + Some(None) + } else { + Some(Some(item)) + } + }) + .flatten() + { + let details = Collection::::get(collection).unwrap(); + let items = Item::::iter_prefix(collection).count() as u32; + assert_eq!(details.items, items); + } + r +} + +fn collections() -> Vec<(u64, u32)> { + let mut r: Vec<_> = CollectionAccount::::iter().map(|x| (x.0, x.1)).collect(); + r.sort(); + let mut s: Vec<_> = Collection::::iter().map(|x| (x.1.owner, x.0)).collect(); + s.sort(); + assert_eq!(r, s); + r +} + +macro_rules! bvec { + ($( $x:tt )*) => { + vec![$( $x )*].try_into().unwrap() + } +} + +fn attributes(collection: u32) -> Vec<(Option, Vec, Vec)> { + let mut s: Vec<_> = Attribute::::iter_prefix((collection,)) + .map(|(k, v)| (k.0, k.1.into(), v.0.into())) + .collect(); + s.sort(); + s +} + +fn events() -> Vec> { + let result = System::events() + .into_iter() + .map(|r| r.event) + .filter_map(|e| if let mock::Event::Uniques(inner) = e { Some(inner) } else { None }) + .collect::>(); + + System::reset_events(); + + result +} + +#[test] +fn basic_setup_works() { + new_test_ext().execute_with(|| { + assert_eq!(items(), vec![]); + }); +} + +#[test] +fn basic_minting_should_work() { + new_test_ext().execute_with(|| { + assert_ok!(Uniques::force_create(Origin::root(), 0, 1, true)); + assert_eq!(collections(), vec![(1, 0)]); + assert_ok!(Uniques::mint(Origin::signed(1), 0, 42, 1)); + assert_eq!(items(), vec![(1, 0, 42)]); + + assert_ok!(Uniques::force_create(Origin::root(), 1, 2, true)); + assert_eq!(collections(), vec![(1, 0), (2, 1)]); + assert_ok!(Uniques::mint(Origin::signed(2), 1, 69, 1)); + assert_eq!(items(), vec![(1, 0, 42), (1, 1, 69)]); + }); +} + +#[test] +fn lifecycle_should_work() { + new_test_ext().execute_with(|| { + Balances::make_free_balance_be(&1, 100); + assert_ok!(Uniques::create(Origin::signed(1), 0, 1)); + assert_eq!(Balances::reserved_balance(&1), 2); + assert_eq!(collections(), vec![(1, 0)]); + assert_ok!(Uniques::set_collection_metadata(Origin::signed(1), 0, bvec![0, 0], false)); + assert_eq!(Balances::reserved_balance(&1), 5); + assert!(CollectionMetadataOf::::contains_key(0)); + + assert_ok!(Uniques::mint(Origin::signed(1), 0, 42, 10)); + assert_eq!(Balances::reserved_balance(&1), 6); + assert_ok!(Uniques::mint(Origin::signed(1), 0, 69, 20)); + assert_eq!(Balances::reserved_balance(&1), 7); + assert_eq!(items(), vec![(10, 0, 42), (20, 0, 69)]); + assert_eq!(Collection::::get(0).unwrap().items, 2); + assert_eq!(Collection::::get(0).unwrap().item_metadatas, 0); + + assert_ok!(Uniques::set_metadata(Origin::signed(1), 0, 42, bvec![42, 42], false)); + assert_eq!(Balances::reserved_balance(&1), 10); + assert!(ItemMetadataOf::::contains_key(0, 42)); + assert_ok!(Uniques::set_metadata(Origin::signed(1), 0, 69, bvec![69, 69], false)); + assert_eq!(Balances::reserved_balance(&1), 13); + assert!(ItemMetadataOf::::contains_key(0, 69)); + + let w = Collection::::get(0).unwrap().destroy_witness(); + assert_eq!(w.items, 2); + assert_eq!(w.item_metadatas, 2); + assert_ok!(Uniques::destroy(Origin::signed(1), 0, w)); + assert_eq!(Balances::reserved_balance(&1), 0); + + assert!(!Collection::::contains_key(0)); + assert!(!Item::::contains_key(0, 42)); + assert!(!Item::::contains_key(0, 69)); + assert!(!CollectionMetadataOf::::contains_key(0)); + assert!(!ItemMetadataOf::::contains_key(0, 42)); + assert!(!ItemMetadataOf::::contains_key(0, 69)); + assert_eq!(collections(), vec![]); + assert_eq!(items(), vec![]); + }); +} + +#[test] +fn destroy_with_bad_witness_should_not_work() { + new_test_ext().execute_with(|| { + Balances::make_free_balance_be(&1, 100); + assert_ok!(Uniques::create(Origin::signed(1), 0, 1)); + + let w = Collection::::get(0).unwrap().destroy_witness(); + assert_ok!(Uniques::mint(Origin::signed(1), 0, 42, 1)); + assert_noop!(Uniques::destroy(Origin::signed(1), 0, w), Error::::BadWitness); + }); +} + +#[test] +fn mint_should_work() { + new_test_ext().execute_with(|| { + assert_ok!(Uniques::force_create(Origin::root(), 0, 1, true)); + assert_ok!(Uniques::mint(Origin::signed(1), 0, 42, 1)); + assert_eq!(Uniques::owner(0, 42).unwrap(), 1); + assert_eq!(collections(), vec![(1, 0)]); + assert_eq!(items(), vec![(1, 0, 42)]); + }); +} + +#[test] +fn transfer_should_work() { + new_test_ext().execute_with(|| { + assert_ok!(Uniques::force_create(Origin::root(), 0, 1, true)); + assert_ok!(Uniques::mint(Origin::signed(1), 0, 42, 2)); + + assert_ok!(Uniques::transfer(Origin::signed(2), 0, 42, 3)); + assert_eq!(items(), vec![(3, 0, 42)]); + assert_noop!(Uniques::transfer(Origin::signed(2), 0, 42, 4), Error::::NoPermission); + + assert_ok!(Uniques::approve_transfer(Origin::signed(3), 0, 42, 2)); + assert_ok!(Uniques::transfer(Origin::signed(2), 0, 42, 4)); + }); +} + +#[test] +fn freezing_should_work() { + new_test_ext().execute_with(|| { + assert_ok!(Uniques::force_create(Origin::root(), 0, 1, true)); + assert_ok!(Uniques::mint(Origin::signed(1), 0, 42, 1)); + assert_ok!(Uniques::freeze(Origin::signed(1), 0, 42)); + assert_noop!(Uniques::transfer(Origin::signed(1), 0, 42, 2), Error::::Frozen); + + assert_ok!(Uniques::thaw(Origin::signed(1), 0, 42)); + assert_ok!(Uniques::freeze_collection(Origin::signed(1), 0)); + assert_noop!(Uniques::transfer(Origin::signed(1), 0, 42, 2), Error::::Frozen); + + assert_ok!(Uniques::thaw_collection(Origin::signed(1), 0)); + assert_ok!(Uniques::transfer(Origin::signed(1), 0, 42, 2)); + }); +} + +#[test] +fn origin_guards_should_work() { + new_test_ext().execute_with(|| { + assert_ok!(Uniques::force_create(Origin::root(), 0, 1, true)); + assert_ok!(Uniques::mint(Origin::signed(1), 0, 42, 1)); + + Balances::make_free_balance_be(&2, 100); + assert_ok!(Uniques::set_accept_ownership(Origin::signed(2), Some(0))); + assert_noop!( + Uniques::transfer_ownership(Origin::signed(2), 0, 2), + Error::::NoPermission + ); + assert_noop!(Uniques::set_team(Origin::signed(2), 0, 2, 2, 2), Error::::NoPermission); + assert_noop!(Uniques::freeze(Origin::signed(2), 0, 42), Error::::NoPermission); + assert_noop!(Uniques::thaw(Origin::signed(2), 0, 42), Error::::NoPermission); + assert_noop!(Uniques::mint(Origin::signed(2), 0, 69, 2), Error::::NoPermission); + assert_noop!(Uniques::burn(Origin::signed(2), 0, 42, None), Error::::NoPermission); + let w = Collection::::get(0).unwrap().destroy_witness(); + assert_noop!(Uniques::destroy(Origin::signed(2), 0, w), Error::::NoPermission); + }); +} + +#[test] +fn transfer_owner_should_work() { + new_test_ext().execute_with(|| { + Balances::make_free_balance_be(&1, 100); + Balances::make_free_balance_be(&2, 100); + Balances::make_free_balance_be(&3, 100); + assert_ok!(Uniques::create(Origin::signed(1), 0, 1)); + assert_eq!(collections(), vec![(1, 0)]); + assert_noop!( + Uniques::transfer_ownership(Origin::signed(1), 0, 2), + Error::::Unaccepted + ); + assert_ok!(Uniques::set_accept_ownership(Origin::signed(2), Some(0))); + assert_ok!(Uniques::transfer_ownership(Origin::signed(1), 0, 2)); + + assert_eq!(collections(), vec![(2, 0)]); + assert_eq!(Balances::total_balance(&1), 98); + assert_eq!(Balances::total_balance(&2), 102); + assert_eq!(Balances::reserved_balance(&1), 0); + assert_eq!(Balances::reserved_balance(&2), 2); + + assert_ok!(Uniques::set_accept_ownership(Origin::signed(1), Some(0))); + assert_noop!( + Uniques::transfer_ownership(Origin::signed(1), 0, 1), + Error::::NoPermission + ); + + // Mint and set metadata now and make sure that deposit gets transferred back. + assert_ok!(Uniques::set_collection_metadata(Origin::signed(2), 0, bvec![0u8; 20], false)); + assert_ok!(Uniques::mint(Origin::signed(1), 0, 42, 1)); + assert_ok!(Uniques::set_metadata(Origin::signed(2), 0, 42, bvec![0u8; 20], false)); + assert_ok!(Uniques::set_accept_ownership(Origin::signed(3), Some(0))); + assert_ok!(Uniques::transfer_ownership(Origin::signed(2), 0, 3)); + assert_eq!(collections(), vec![(3, 0)]); + assert_eq!(Balances::total_balance(&2), 57); + assert_eq!(Balances::total_balance(&3), 145); + assert_eq!(Balances::reserved_balance(&2), 0); + assert_eq!(Balances::reserved_balance(&3), 45); + + // 2's acceptence from before is reset when it became owner, so it cannot be transfered + // without a fresh acceptance. + assert_noop!( + Uniques::transfer_ownership(Origin::signed(3), 0, 2), + Error::::Unaccepted + ); + }); +} + +#[test] +fn set_team_should_work() { + new_test_ext().execute_with(|| { + assert_ok!(Uniques::force_create(Origin::root(), 0, 1, true)); + assert_ok!(Uniques::set_team(Origin::signed(1), 0, 2, 3, 4)); + + assert_ok!(Uniques::mint(Origin::signed(2), 0, 42, 2)); + assert_ok!(Uniques::freeze(Origin::signed(4), 0, 42)); + assert_ok!(Uniques::thaw(Origin::signed(3), 0, 42)); + assert_ok!(Uniques::transfer(Origin::signed(3), 0, 42, 3)); + assert_ok!(Uniques::burn(Origin::signed(3), 0, 42, None)); + }); +} + +#[test] +fn set_collection_metadata_should_work() { + new_test_ext().execute_with(|| { + // Cannot add metadata to unknown item + assert_noop!( + Uniques::set_collection_metadata(Origin::signed(1), 0, bvec![0u8; 20], false), + Error::::UnknownCollection, + ); + assert_ok!(Uniques::force_create(Origin::root(), 0, 1, false)); + // Cannot add metadata to unowned item + assert_noop!( + Uniques::set_collection_metadata(Origin::signed(2), 0, bvec![0u8; 20], false), + Error::::NoPermission, + ); + + // Successfully add metadata and take deposit + Balances::make_free_balance_be(&1, 30); + assert_ok!(Uniques::set_collection_metadata(Origin::signed(1), 0, bvec![0u8; 20], false)); + assert_eq!(Balances::free_balance(&1), 9); + assert!(CollectionMetadataOf::::contains_key(0)); + + // Force origin works, too. + assert_ok!(Uniques::set_collection_metadata(Origin::root(), 0, bvec![0u8; 18], false)); + + // Update deposit + assert_ok!(Uniques::set_collection_metadata(Origin::signed(1), 0, bvec![0u8; 15], false)); + assert_eq!(Balances::free_balance(&1), 14); + assert_ok!(Uniques::set_collection_metadata(Origin::signed(1), 0, bvec![0u8; 25], false)); + assert_eq!(Balances::free_balance(&1), 4); + + // Cannot over-reserve + assert_noop!( + Uniques::set_collection_metadata(Origin::signed(1), 0, bvec![0u8; 40], false), + BalancesError::::InsufficientBalance, + ); + + // Can't set or clear metadata once frozen + assert_ok!(Uniques::set_collection_metadata(Origin::signed(1), 0, bvec![0u8; 15], true)); + assert_noop!( + Uniques::set_collection_metadata(Origin::signed(1), 0, bvec![0u8; 15], false), + Error::::Frozen, + ); + assert_noop!( + Uniques::clear_collection_metadata(Origin::signed(1), 0), + Error::::Frozen + ); + + // Clear Metadata + assert_ok!(Uniques::set_collection_metadata(Origin::root(), 0, bvec![0u8; 15], false)); + assert_noop!( + Uniques::clear_collection_metadata(Origin::signed(2), 0), + Error::::NoPermission + ); + assert_noop!( + Uniques::clear_collection_metadata(Origin::signed(1), 1), + Error::::UnknownCollection + ); + assert_ok!(Uniques::clear_collection_metadata(Origin::signed(1), 0)); + assert!(!CollectionMetadataOf::::contains_key(0)); + }); +} + +#[test] +fn set_item_metadata_should_work() { + new_test_ext().execute_with(|| { + Balances::make_free_balance_be(&1, 30); + + // Cannot add metadata to unknown item + assert_ok!(Uniques::force_create(Origin::root(), 0, 1, false)); + assert_ok!(Uniques::mint(Origin::signed(1), 0, 42, 1)); + // Cannot add metadata to unowned item + assert_noop!( + Uniques::set_metadata(Origin::signed(2), 0, 42, bvec![0u8; 20], false), + Error::::NoPermission, + ); + + // Successfully add metadata and take deposit + assert_ok!(Uniques::set_metadata(Origin::signed(1), 0, 42, bvec![0u8; 20], false)); + assert_eq!(Balances::free_balance(&1), 8); + assert!(ItemMetadataOf::::contains_key(0, 42)); + + // Force origin works, too. + assert_ok!(Uniques::set_metadata(Origin::root(), 0, 42, bvec![0u8; 18], false)); + + // Update deposit + assert_ok!(Uniques::set_metadata(Origin::signed(1), 0, 42, bvec![0u8; 15], false)); + assert_eq!(Balances::free_balance(&1), 13); + assert_ok!(Uniques::set_metadata(Origin::signed(1), 0, 42, bvec![0u8; 25], false)); + assert_eq!(Balances::free_balance(&1), 3); + + // Cannot over-reserve + assert_noop!( + Uniques::set_metadata(Origin::signed(1), 0, 42, bvec![0u8; 40], false), + BalancesError::::InsufficientBalance, + ); + + // Can't set or clear metadata once frozen + assert_ok!(Uniques::set_metadata(Origin::signed(1), 0, 42, bvec![0u8; 15], true)); + assert_noop!( + Uniques::set_metadata(Origin::signed(1), 0, 42, bvec![0u8; 15], false), + Error::::Frozen, + ); + assert_noop!(Uniques::clear_metadata(Origin::signed(1), 0, 42), Error::::Frozen); + + // Clear Metadata + assert_ok!(Uniques::set_metadata(Origin::root(), 0, 42, bvec![0u8; 15], false)); + assert_noop!( + Uniques::clear_metadata(Origin::signed(2), 0, 42), + Error::::NoPermission + ); + assert_noop!( + Uniques::clear_metadata(Origin::signed(1), 1, 42), + Error::::UnknownCollection + ); + assert_ok!(Uniques::clear_metadata(Origin::signed(1), 0, 42)); + assert!(!ItemMetadataOf::::contains_key(0, 42)); + }); +} + +#[test] +fn set_attribute_should_work() { + new_test_ext().execute_with(|| { + Balances::make_free_balance_be(&1, 100); + + assert_ok!(Uniques::force_create(Origin::root(), 0, 1, false)); + + assert_ok!(Uniques::set_attribute(Origin::signed(1), 0, None, bvec![0], bvec![0])); + assert_ok!(Uniques::set_attribute(Origin::signed(1), 0, Some(0), bvec![0], bvec![0])); + assert_ok!(Uniques::set_attribute(Origin::signed(1), 0, Some(0), bvec![1], bvec![0])); + assert_eq!( + attributes(0), + vec![ + (None, bvec![0], bvec![0]), + (Some(0), bvec![0], bvec![0]), + (Some(0), bvec![1], bvec![0]), + ] + ); + assert_eq!(Balances::reserved_balance(1), 9); + + assert_ok!(Uniques::set_attribute(Origin::signed(1), 0, None, bvec![0], bvec![0; 10])); + assert_eq!( + attributes(0), + vec![ + (None, bvec![0], bvec![0; 10]), + (Some(0), bvec![0], bvec![0]), + (Some(0), bvec![1], bvec![0]), + ] + ); + assert_eq!(Balances::reserved_balance(1), 18); + + assert_ok!(Uniques::clear_attribute(Origin::signed(1), 0, Some(0), bvec![1])); + assert_eq!( + attributes(0), + vec![(None, bvec![0], bvec![0; 10]), (Some(0), bvec![0], bvec![0]),] + ); + assert_eq!(Balances::reserved_balance(1), 15); + + let w = Collection::::get(0).unwrap().destroy_witness(); + assert_ok!(Uniques::destroy(Origin::signed(1), 0, w)); + assert_eq!(attributes(0), vec![]); + assert_eq!(Balances::reserved_balance(1), 0); + }); +} + +#[test] +fn set_attribute_should_respect_freeze() { + new_test_ext().execute_with(|| { + Balances::make_free_balance_be(&1, 100); + + assert_ok!(Uniques::force_create(Origin::root(), 0, 1, false)); + + assert_ok!(Uniques::set_attribute(Origin::signed(1), 0, None, bvec![0], bvec![0])); + assert_ok!(Uniques::set_attribute(Origin::signed(1), 0, Some(0), bvec![0], bvec![0])); + assert_ok!(Uniques::set_attribute(Origin::signed(1), 0, Some(1), bvec![0], bvec![0])); + assert_eq!( + attributes(0), + vec![ + (None, bvec![0], bvec![0]), + (Some(0), bvec![0], bvec![0]), + (Some(1), bvec![0], bvec![0]), + ] + ); + assert_eq!(Balances::reserved_balance(1), 9); + + assert_ok!(Uniques::set_collection_metadata(Origin::signed(1), 0, bvec![], true)); + let e = Error::::Frozen; + assert_noop!(Uniques::set_attribute(Origin::signed(1), 0, None, bvec![0], bvec![0]), e); + assert_ok!(Uniques::set_attribute(Origin::signed(1), 0, Some(0), bvec![0], bvec![1])); + + assert_ok!(Uniques::set_metadata(Origin::signed(1), 0, 0, bvec![], true)); + let e = Error::::Frozen; + assert_noop!(Uniques::set_attribute(Origin::signed(1), 0, Some(0), bvec![0], bvec![1]), e); + assert_ok!(Uniques::set_attribute(Origin::signed(1), 0, Some(1), bvec![0], bvec![1])); + }); +} + +#[test] +fn force_item_status_should_work() { + new_test_ext().execute_with(|| { + Balances::make_free_balance_be(&1, 100); + + assert_ok!(Uniques::force_create(Origin::root(), 0, 1, false)); + assert_ok!(Uniques::mint(Origin::signed(1), 0, 42, 1)); + assert_ok!(Uniques::mint(Origin::signed(1), 0, 69, 2)); + assert_ok!(Uniques::set_collection_metadata(Origin::signed(1), 0, bvec![0; 20], false)); + assert_ok!(Uniques::set_metadata(Origin::signed(1), 0, 42, bvec![0; 20], false)); + assert_ok!(Uniques::set_metadata(Origin::signed(1), 0, 69, bvec![0; 20], false)); + assert_eq!(Balances::reserved_balance(1), 65); + + // force item status to be free holding + assert_ok!(Uniques::force_item_status(Origin::root(), 0, 1, 1, 1, 1, true, false)); + assert_ok!(Uniques::mint(Origin::signed(1), 0, 142, 1)); + assert_ok!(Uniques::mint(Origin::signed(1), 0, 169, 2)); + assert_ok!(Uniques::set_metadata(Origin::signed(1), 0, 142, bvec![0; 20], false)); + assert_ok!(Uniques::set_metadata(Origin::signed(1), 0, 169, bvec![0; 20], false)); + assert_eq!(Balances::reserved_balance(1), 65); + + assert_ok!(Uniques::redeposit(Origin::signed(1), 0, bvec![0, 42, 50, 69, 100])); + assert_eq!(Balances::reserved_balance(1), 63); + + assert_ok!(Uniques::set_metadata(Origin::signed(1), 0, 42, bvec![0; 20], false)); + assert_eq!(Balances::reserved_balance(1), 42); + + assert_ok!(Uniques::set_metadata(Origin::signed(1), 0, 69, bvec![0; 20], false)); + assert_eq!(Balances::reserved_balance(1), 21); + + assert_ok!(Uniques::set_collection_metadata(Origin::signed(1), 0, bvec![0; 20], false)); + assert_eq!(Balances::reserved_balance(1), 0); + }); +} + +#[test] +fn burn_works() { + new_test_ext().execute_with(|| { + Balances::make_free_balance_be(&1, 100); + assert_ok!(Uniques::force_create(Origin::root(), 0, 1, false)); + assert_ok!(Uniques::set_team(Origin::signed(1), 0, 2, 3, 4)); + + assert_noop!( + Uniques::burn(Origin::signed(5), 0, 42, Some(5)), + Error::::UnknownCollection + ); + + assert_ok!(Uniques::mint(Origin::signed(2), 0, 42, 5)); + assert_ok!(Uniques::mint(Origin::signed(2), 0, 69, 5)); + assert_eq!(Balances::reserved_balance(1), 2); + + assert_noop!(Uniques::burn(Origin::signed(0), 0, 42, None), Error::::NoPermission); + assert_noop!(Uniques::burn(Origin::signed(5), 0, 42, Some(6)), Error::::WrongOwner); + + assert_ok!(Uniques::burn(Origin::signed(5), 0, 42, Some(5))); + assert_ok!(Uniques::burn(Origin::signed(3), 0, 69, Some(5))); + assert_eq!(Balances::reserved_balance(1), 0); + }); +} + +#[test] +fn approval_lifecycle_works() { + new_test_ext().execute_with(|| { + assert_ok!(Uniques::force_create(Origin::root(), 0, 1, true)); + assert_ok!(Uniques::mint(Origin::signed(1), 0, 42, 2)); + assert_ok!(Uniques::approve_transfer(Origin::signed(2), 0, 42, 3)); + assert_ok!(Uniques::transfer(Origin::signed(3), 0, 42, 4)); + assert_noop!(Uniques::transfer(Origin::signed(3), 0, 42, 3), Error::::NoPermission); + assert!(Item::::get(0, 42).unwrap().approved.is_none()); + + assert_ok!(Uniques::approve_transfer(Origin::signed(4), 0, 42, 2)); + assert_ok!(Uniques::transfer(Origin::signed(2), 0, 42, 2)); + }); +} + +#[test] +fn cancel_approval_works() { + new_test_ext().execute_with(|| { + assert_ok!(Uniques::force_create(Origin::root(), 0, 1, true)); + assert_ok!(Uniques::mint(Origin::signed(1), 0, 42, 2)); + + assert_ok!(Uniques::approve_transfer(Origin::signed(2), 0, 42, 3)); + assert_noop!( + Uniques::cancel_approval(Origin::signed(2), 1, 42, None), + Error::::UnknownCollection + ); + assert_noop!( + Uniques::cancel_approval(Origin::signed(2), 0, 43, None), + Error::::UnknownCollection + ); + assert_noop!( + Uniques::cancel_approval(Origin::signed(3), 0, 42, None), + Error::::NoPermission + ); + assert_noop!( + Uniques::cancel_approval(Origin::signed(2), 0, 42, Some(4)), + Error::::WrongDelegate + ); + + assert_ok!(Uniques::cancel_approval(Origin::signed(2), 0, 42, Some(3))); + assert_noop!( + Uniques::cancel_approval(Origin::signed(2), 0, 42, None), + Error::::NoDelegate + ); + }); +} + +#[test] +fn cancel_approval_works_with_admin() { + new_test_ext().execute_with(|| { + assert_ok!(Uniques::force_create(Origin::root(), 0, 1, true)); + assert_ok!(Uniques::mint(Origin::signed(1), 0, 42, 2)); + + assert_ok!(Uniques::approve_transfer(Origin::signed(2), 0, 42, 3)); + assert_noop!( + Uniques::cancel_approval(Origin::signed(1), 1, 42, None), + Error::::UnknownCollection + ); + assert_noop!( + Uniques::cancel_approval(Origin::signed(1), 0, 43, None), + Error::::UnknownCollection + ); + assert_noop!( + Uniques::cancel_approval(Origin::signed(1), 0, 42, Some(4)), + Error::::WrongDelegate + ); + + assert_ok!(Uniques::cancel_approval(Origin::signed(1), 0, 42, Some(3))); + assert_noop!( + Uniques::cancel_approval(Origin::signed(1), 0, 42, None), + Error::::NoDelegate + ); + }); +} + +#[test] +fn cancel_approval_works_with_force() { + new_test_ext().execute_with(|| { + assert_ok!(Uniques::force_create(Origin::root(), 0, 1, true)); + assert_ok!(Uniques::mint(Origin::signed(1), 0, 42, 2)); + + assert_ok!(Uniques::approve_transfer(Origin::signed(2), 0, 42, 3)); + assert_noop!( + Uniques::cancel_approval(Origin::root(), 1, 42, None), + Error::::UnknownCollection + ); + assert_noop!( + Uniques::cancel_approval(Origin::root(), 0, 43, None), + Error::::UnknownCollection + ); + assert_noop!( + Uniques::cancel_approval(Origin::root(), 0, 42, Some(4)), + Error::::WrongDelegate + ); + + assert_ok!(Uniques::cancel_approval(Origin::root(), 0, 42, Some(3))); + assert_noop!( + Uniques::cancel_approval(Origin::root(), 0, 42, None), + Error::::NoDelegate + ); + }); +} + +#[test] +fn max_supply_should_work() { + new_test_ext().execute_with(|| { + let collection_id = 0; + let user_id = 1; + let max_supply = 2; + + // validate set_collection_max_supply + assert_ok!(Uniques::force_create(Origin::root(), collection_id, user_id, true)); + assert!(!CollectionMaxSupply::::contains_key(collection_id)); + + assert_ok!(Uniques::set_collection_max_supply( + Origin::signed(user_id), + collection_id, + max_supply + )); + assert_eq!(CollectionMaxSupply::::get(collection_id).unwrap(), max_supply); + + assert!(events().contains(&Event::::CollectionMaxSupplySet { + collection: collection_id, + max_supply, + })); + + assert_noop!( + Uniques::set_collection_max_supply( + Origin::signed(user_id), + collection_id, + max_supply + 1 + ), + Error::::MaxSupplyAlreadySet + ); + + // validate we can't mint more to max supply + assert_ok!(Uniques::mint(Origin::signed(user_id), collection_id, 0, user_id)); + assert_ok!(Uniques::mint(Origin::signed(user_id), collection_id, 1, user_id)); + assert_noop!( + Uniques::mint(Origin::signed(user_id), collection_id, 2, user_id), + Error::::MaxSupplyReached + ); + + // validate we remove the CollectionMaxSupply record when we destroy the collection + assert_ok!(Uniques::destroy( + Origin::signed(user_id), + collection_id, + Collection::::get(collection_id).unwrap().destroy_witness() + )); + assert!(!CollectionMaxSupply::::contains_key(collection_id)); + }); +} + +#[test] +fn set_price_should_work() { + new_test_ext().execute_with(|| { + let user_id = 1; + let collection_id = 0; + let item_1 = 1; + let item_2 = 2; + + assert_ok!(Uniques::force_create(Origin::root(), collection_id, user_id, true)); + + assert_ok!(Uniques::mint(Origin::signed(user_id), collection_id, item_1, user_id)); + assert_ok!(Uniques::mint(Origin::signed(user_id), collection_id, item_2, user_id)); + + assert_ok!(Uniques::set_price( + Origin::signed(user_id), + collection_id, + item_1, + Some(1), + None, + )); + + assert_ok!(Uniques::set_price( + Origin::signed(user_id), + collection_id, + item_2, + Some(2), + Some(3) + )); + + let item = ItemPriceOf::::get(collection_id, item_1).unwrap(); + assert_eq!(item.0, 1); + assert_eq!(item.1, None); + + let item = ItemPriceOf::::get(collection_id, item_2).unwrap(); + assert_eq!(item.0, 2); + assert_eq!(item.1, Some(3)); + + assert!(events().contains(&Event::::ItemPriceSet { + collection: collection_id, + item: item_1, + price: 1, + whitelisted_buyer: None, + })); + + // validate we can unset the price + assert_ok!(Uniques::set_price(Origin::signed(user_id), collection_id, item_2, None, None)); + assert!(events().contains(&Event::::ItemPriceRemoved { + collection: collection_id, + item: item_2 + })); + assert!(!ItemPriceOf::::contains_key(collection_id, item_2)); + }); +} + +#[test] +fn buy_item_should_work() { + new_test_ext().execute_with(|| { + let user_1 = 1; + let user_2 = 2; + let user_3 = 3; + let collection_id = 0; + let item_1 = 1; + let item_2 = 2; + let item_3 = 3; + let price_1 = 20; + let price_2 = 30; + let initial_balance = 100; + + Balances::make_free_balance_be(&user_1, initial_balance); + Balances::make_free_balance_be(&user_2, initial_balance); + Balances::make_free_balance_be(&user_3, initial_balance); + + assert_ok!(Uniques::force_create(Origin::root(), collection_id, user_1, true)); + + assert_ok!(Uniques::mint(Origin::signed(user_1), collection_id, item_1, user_1)); + assert_ok!(Uniques::mint(Origin::signed(user_1), collection_id, item_2, user_1)); + assert_ok!(Uniques::mint(Origin::signed(user_1), collection_id, item_3, user_1)); + + assert_ok!(Uniques::set_price( + Origin::signed(user_1), + collection_id, + item_1, + Some(price_1), + None, + )); + + assert_ok!(Uniques::set_price( + Origin::signed(user_1), + collection_id, + item_2, + Some(price_2), + Some(user_3), + )); + + // can't buy for less + assert_noop!( + Uniques::buy_item(Origin::signed(user_2), collection_id, item_1, 1), + Error::::BidTooLow + ); + + // pass the higher price to validate it will still deduct correctly + assert_ok!(Uniques::buy_item(Origin::signed(user_2), collection_id, item_1, price_1 + 1,)); + + // validate the new owner & balances + let item = Item::::get(collection_id, item_1).unwrap(); + assert_eq!(item.owner, user_2); + assert_eq!(Balances::total_balance(&user_1), initial_balance + price_1); + assert_eq!(Balances::total_balance(&user_2), initial_balance - price_1); + + // can't buy from yourself + assert_noop!( + Uniques::buy_item(Origin::signed(user_1), collection_id, item_2, price_2), + Error::::NoPermission + ); + + // can't buy when the item is listed for a specific buyer + assert_noop!( + Uniques::buy_item(Origin::signed(user_2), collection_id, item_2, price_2), + Error::::NoPermission + ); + + // can buy when I'm a whitelisted buyer + assert_ok!(Uniques::buy_item(Origin::signed(user_3), collection_id, item_2, price_2,)); + + assert!(events().contains(&Event::::ItemBought { + collection: collection_id, + item: item_2, + price: price_2, + seller: user_1, + buyer: user_3, + })); + + // ensure we reset the buyer field + assert!(!ItemPriceOf::::contains_key(collection_id, item_2)); + + // can't buy when item is not for sale + assert_noop!( + Uniques::buy_item(Origin::signed(user_2), collection_id, item_3, price_2), + Error::::NotForSale + ); + + // ensure we can't buy an item when the collection or an item is frozen + { + assert_ok!(Uniques::set_price( + Origin::signed(user_1), + collection_id, + item_3, + Some(price_1), + None, + )); + + // freeze collection + assert_ok!(Uniques::freeze_collection(Origin::signed(user_1), collection_id)); + + let buy_item_call = mock::Call::Uniques(crate::Call::::buy_item { + collection: collection_id, + item: item_3, + bid_price: price_1, + }); + assert_noop!(buy_item_call.dispatch(Origin::signed(user_2)), Error::::Frozen); + + assert_ok!(Uniques::thaw_collection(Origin::signed(user_1), collection_id)); + + // freeze item + assert_ok!(Uniques::freeze(Origin::signed(user_1), collection_id, item_3)); + + let buy_item_call = mock::Call::Uniques(crate::Call::::buy_item { + collection: collection_id, + item: item_3, + bid_price: price_1, + }); + assert_noop!(buy_item_call.dispatch(Origin::signed(user_2)), Error::::Frozen); + } + }); +} diff --git a/frame/nfts/src/types.rs b/frame/nfts/src/types.rs new file mode 100644 index 0000000000000..98e056163d28d --- /dev/null +++ b/frame/nfts/src/types.rs @@ -0,0 +1,129 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Various basic types for use in the Uniques pallet. + +use super::*; +use frame_support::{ + pallet_prelude::{BoundedVec, MaxEncodedLen}, + traits::Get, +}; +use scale_info::TypeInfo; + +pub(super) type DepositBalanceOf = + <>::Currency as Currency<::AccountId>>::Balance; +pub(super) type CollectionDetailsFor = + CollectionDetails<::AccountId, DepositBalanceOf>; +pub(super) type ItemDetailsFor = + ItemDetails<::AccountId, DepositBalanceOf>; +pub(super) type ItemPrice = + <>::Currency as Currency<::AccountId>>::Balance; + +#[derive(Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug, TypeInfo, MaxEncodedLen)] +pub struct CollectionDetails { + /// Can change `owner`, `issuer`, `freezer` and `admin` accounts. + pub(super) owner: AccountId, + /// Can mint tokens. + pub(super) issuer: AccountId, + /// Can thaw tokens, force transfers and burn tokens from any account. + pub(super) admin: AccountId, + /// Can freeze tokens. + pub(super) freezer: AccountId, + /// The total balance deposited for the all storage associated with this collection. + /// Used by `destroy`. + pub(super) total_deposit: DepositBalance, + /// If `true`, then no deposit is needed to hold items of this collection. + pub(super) free_holding: bool, + /// The total number of outstanding items of this collection. + pub(super) items: u32, + /// The total number of outstanding item metadata of this collection. + pub(super) item_metadatas: u32, + /// The total number of attributes for this collection. + pub(super) attributes: u32, + /// Whether the collection is frozen for non-admin transfers. + pub(super) is_frozen: bool, +} + +/// Witness data for the destroy transactions. +#[derive(Copy, Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug, TypeInfo, MaxEncodedLen)] +pub struct DestroyWitness { + /// The total number of outstanding items of this collection. + #[codec(compact)] + pub items: u32, + /// The total number of items in this collection that have outstanding item metadata. + #[codec(compact)] + pub item_metadatas: u32, + #[codec(compact)] + /// The total number of attributes for this collection. + pub attributes: u32, +} + +impl CollectionDetails { + pub fn destroy_witness(&self) -> DestroyWitness { + DestroyWitness { + items: self.items, + item_metadatas: self.item_metadatas, + attributes: self.attributes, + } + } +} + +/// Information concerning the ownership of a single unique item. +#[derive(Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug, Default, TypeInfo, MaxEncodedLen)] +pub struct ItemDetails { + /// The owner of this item. + pub(super) owner: AccountId, + /// The approved transferrer of this item, if one is set. + pub(super) approved: Option, + /// Whether the item can be transferred or not. + pub(super) is_frozen: bool, + /// The amount held in the pallet's default account for this item. Free-hold items will have + /// this as zero. + pub(super) deposit: DepositBalance, +} + +#[derive(Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug, Default, TypeInfo, MaxEncodedLen)] +#[scale_info(skip_type_params(StringLimit))] +#[codec(mel_bound(DepositBalance: MaxEncodedLen))] +pub struct CollectionMetadata> { + /// The balance deposited for this metadata. + /// + /// This pays for the data stored in this struct. + pub(super) deposit: DepositBalance, + /// General information concerning this collection. Limited in length by `StringLimit`. This + /// will generally be either a JSON dump or the hash of some JSON which can be found on a + /// hash-addressable global publication system such as IPFS. + pub(super) data: BoundedVec, + /// Whether the collection's metadata may be changed by a non Force origin. + pub(super) is_frozen: bool, +} + +#[derive(Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug, Default, TypeInfo, MaxEncodedLen)] +#[scale_info(skip_type_params(StringLimit))] +#[codec(mel_bound(DepositBalance: MaxEncodedLen))] +pub struct ItemMetadata> { + /// The balance deposited for this metadata. + /// + /// This pays for the data stored in this struct. + pub(super) deposit: DepositBalance, + /// General information concerning this item. Limited in length by `StringLimit`. This will + /// generally be either a JSON dump or the hash of some JSON which can be found on a + /// hash-addressable global publication system such as IPFS. + pub(super) data: BoundedVec, + /// Whether the item metadata may be changed by a non Force origin. + pub(super) is_frozen: bool, +} diff --git a/frame/nfts/src/weights.rs b/frame/nfts/src/weights.rs new file mode 100644 index 0000000000000..7c8cb170b1b1d --- /dev/null +++ b/frame/nfts/src/weights.rs @@ -0,0 +1,510 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for pallet_uniques +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2022-07-13, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! HOSTNAME: `test-bench-bot`, CPU: `Intel(R) Xeon(R) CPU @ 3.10GHz` +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 + +// Executed Command: +// target/production/substrate +// benchmark +// pallet +// --steps=50 +// --repeat=20 +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --heap-pages=4096 +// --pallet=pallet_uniques +// --chain=dev +// --output=./frame/uniques/src/weights.rs +// --template=./.maintain/frame-weight-template.hbs + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] + +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use sp_std::marker::PhantomData; + +/// Weight functions needed for pallet_uniques. +pub trait WeightInfo { + fn create() -> Weight; + fn force_create() -> Weight; + fn destroy(n: u32, m: u32, a: u32, ) -> Weight; + fn mint() -> Weight; + fn burn() -> Weight; + fn transfer() -> Weight; + fn redeposit(i: u32, ) -> Weight; + fn freeze() -> Weight; + fn thaw() -> Weight; + fn freeze_collection() -> Weight; + fn thaw_collection() -> Weight; + fn transfer_ownership() -> Weight; + fn set_team() -> Weight; + fn force_item_status() -> Weight; + fn set_attribute() -> Weight; + fn clear_attribute() -> Weight; + fn set_metadata() -> Weight; + fn clear_metadata() -> Weight; + fn set_collection_metadata() -> Weight; + fn clear_collection_metadata() -> Weight; + fn approve_transfer() -> Weight; + fn cancel_approval() -> Weight; + fn set_accept_ownership() -> Weight; + fn set_collection_max_supply() -> Weight; + fn set_price() -> Weight; + fn buy_item() -> Weight; +} + +/// Weights for pallet_uniques using the Substrate node and recommended hardware. +pub struct SubstrateWeight(PhantomData); +impl WeightInfo for SubstrateWeight { + // Storage: Uniques Class (r:1 w:1) + // Storage: Uniques ClassAccount (r:0 w:1) + fn create() -> Weight { + (33_075_000 as Weight) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) + } + // Storage: Uniques Class (r:1 w:1) + // Storage: Uniques ClassAccount (r:0 w:1) + fn force_create() -> Weight { + (19_528_000 as Weight) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) + } + // Storage: Uniques Class (r:1 w:1) + // Storage: Uniques Asset (r:1 w:0) + // Storage: Uniques ClassAccount (r:0 w:1) + // Storage: Uniques Attribute (r:0 w:1000) + // Storage: Uniques ClassMetadataOf (r:0 w:1) + // Storage: Uniques InstanceMetadataOf (r:0 w:1000) + // Storage: Uniques CollectionMaxSupply (r:0 w:1) + // Storage: Uniques Account (r:0 w:20) + /// The range of component `n` is `[0, 1000]`. + /// The range of component `m` is `[0, 1000]`. + /// The range of component `a` is `[0, 1000]`. + fn destroy(n: u32, m: u32, a: u32, ) -> Weight { + (0 as Weight) + // Standard Error: 25_000 + .saturating_add((13_639_000 as Weight).saturating_mul(n as Weight)) + // Standard Error: 25_000 + .saturating_add((2_393_000 as Weight).saturating_mul(m as Weight)) + // Standard Error: 25_000 + .saturating_add((2_217_000 as Weight).saturating_mul(a as Weight)) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(n as Weight))) + .saturating_add(T::DbWeight::get().writes(4 as Weight)) + .saturating_add(T::DbWeight::get().writes((2 as Weight).saturating_mul(n as Weight))) + .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(m as Weight))) + .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(a as Weight))) + } + // Storage: Uniques Asset (r:1 w:1) + // Storage: Uniques Class (r:1 w:1) + // Storage: Uniques CollectionMaxSupply (r:1 w:0) + // Storage: Uniques Account (r:0 w:1) + fn mint() -> Weight { + (42_146_000 as Weight) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) + .saturating_add(T::DbWeight::get().writes(3 as Weight)) + } + // Storage: Uniques Class (r:1 w:1) + // Storage: Uniques Asset (r:1 w:1) + // Storage: Uniques Account (r:0 w:1) + // Storage: Uniques ItemPriceOf (r:0 w:1) + fn burn() -> Weight { + (42_960_000 as Weight) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(4 as Weight)) + } + // Storage: Uniques Class (r:1 w:0) + // Storage: Uniques Asset (r:1 w:1) + // Storage: Uniques Account (r:0 w:2) + // Storage: Uniques ItemPriceOf (r:0 w:1) + fn transfer() -> Weight { + (33_025_000 as Weight) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(4 as Weight)) + } + // Storage: Uniques Class (r:1 w:1) + // Storage: Uniques Asset (r:100 w:100) + /// The range of component `i` is `[0, 5000]`. + fn redeposit(i: u32, ) -> Weight { + (0 as Weight) + // Standard Error: 24_000 + .saturating_add((15_540_000 as Weight).saturating_mul(i as Weight)) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(i as Weight))) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(i as Weight))) + } + // Storage: Uniques Asset (r:1 w:1) + // Storage: Uniques Class (r:1 w:0) + fn freeze() -> Weight { + (25_194_000 as Weight) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + // Storage: Uniques Asset (r:1 w:1) + // Storage: Uniques Class (r:1 w:0) + fn thaw() -> Weight { + (25_397_000 as Weight) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + // Storage: Uniques Class (r:1 w:1) + fn freeze_collection() -> Weight { + (19_278_000 as Weight) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + // Storage: Uniques Class (r:1 w:1) + fn thaw_collection() -> Weight { + (19_304_000 as Weight) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + // Storage: Uniques OwnershipAcceptance (r:1 w:1) + // Storage: Uniques Class (r:1 w:1) + // Storage: Uniques ClassAccount (r:0 w:2) + fn transfer_ownership() -> Weight { + (28_615_000 as Weight) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(4 as Weight)) + } + // Storage: Uniques Class (r:1 w:1) + fn set_team() -> Weight { + (19_943_000 as Weight) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + // Storage: Uniques Class (r:1 w:1) + // Storage: Uniques ClassAccount (r:0 w:1) + fn force_item_status() -> Weight { + (22_583_000 as Weight) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) + } + // Storage: Uniques Class (r:1 w:1) + // Storage: Uniques InstanceMetadataOf (r:1 w:0) + // Storage: Uniques Attribute (r:1 w:1) + fn set_attribute() -> Weight { + (47_520_000 as Weight) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) + } + // Storage: Uniques Class (r:1 w:1) + // Storage: Uniques InstanceMetadataOf (r:1 w:0) + // Storage: Uniques Attribute (r:1 w:1) + fn clear_attribute() -> Weight { + (45_316_000 as Weight) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) + } + // Storage: Uniques Class (r:1 w:1) + // Storage: Uniques InstanceMetadataOf (r:1 w:1) + fn set_metadata() -> Weight { + (38_391_000 as Weight) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) + } + // Storage: Uniques Class (r:1 w:1) + // Storage: Uniques InstanceMetadataOf (r:1 w:1) + fn clear_metadata() -> Weight { + (38_023_000 as Weight) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) + } + // Storage: Uniques Class (r:1 w:1) + // Storage: Uniques ClassMetadataOf (r:1 w:1) + fn set_collection_metadata() -> Weight { + (37_398_000 as Weight) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) + } + // Storage: Uniques Class (r:1 w:0) + // Storage: Uniques ClassMetadataOf (r:1 w:1) + fn clear_collection_metadata() -> Weight { + (35_621_000 as Weight) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + // Storage: Uniques Class (r:1 w:0) + // Storage: Uniques Asset (r:1 w:1) + fn approve_transfer() -> Weight { + (25_856_000 as Weight) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + // Storage: Uniques Class (r:1 w:0) + // Storage: Uniques Asset (r:1 w:1) + fn cancel_approval() -> Weight { + (26_098_000 as Weight) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + // Storage: Uniques OwnershipAcceptance (r:1 w:1) + fn set_accept_ownership() -> Weight { + (24_076_000 as Weight) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + // Storage: Uniques CollectionMaxSupply (r:1 w:1) + // Storage: Uniques Class (r:1 w:0) + fn set_collection_max_supply() -> Weight { + (22_035_000 as Weight) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + // Storage: Uniques Asset (r:1 w:0) + // Storage: Uniques ItemPriceOf (r:0 w:1) + fn set_price() -> Weight { + (22_534_000 as Weight) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + // Storage: Uniques Asset (r:1 w:1) + // Storage: Uniques ItemPriceOf (r:1 w:1) + // Storage: Uniques Class (r:1 w:0) + // Storage: Uniques Account (r:0 w:2) + fn buy_item() -> Weight { + (45_272_000 as Weight) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) + .saturating_add(T::DbWeight::get().writes(4 as Weight)) + } +} + +// For backwards compatibility and tests +impl WeightInfo for () { + // Storage: Uniques Class (r:1 w:1) + // Storage: Uniques ClassAccount (r:0 w:1) + fn create() -> Weight { + (33_075_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + } + // Storage: Uniques Class (r:1 w:1) + // Storage: Uniques ClassAccount (r:0 w:1) + fn force_create() -> Weight { + (19_528_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + } + // Storage: Uniques Class (r:1 w:1) + // Storage: Uniques Asset (r:1 w:0) + // Storage: Uniques ClassAccount (r:0 w:1) + // Storage: Uniques Attribute (r:0 w:1000) + // Storage: Uniques ClassMetadataOf (r:0 w:1) + // Storage: Uniques InstanceMetadataOf (r:0 w:1000) + // Storage: Uniques CollectionMaxSupply (r:0 w:1) + // Storage: Uniques Account (r:0 w:20) + /// The range of component `n` is `[0, 1000]`. + /// The range of component `m` is `[0, 1000]`. + /// The range of component `a` is `[0, 1000]`. + fn destroy(n: u32, m: u32, a: u32, ) -> Weight { + (0 as Weight) + // Standard Error: 25_000 + .saturating_add((13_639_000 as Weight).saturating_mul(n as Weight)) + // Standard Error: 25_000 + .saturating_add((2_393_000 as Weight).saturating_mul(m as Weight)) + // Standard Error: 25_000 + .saturating_add((2_217_000 as Weight).saturating_mul(a as Weight)) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().reads((1 as Weight).saturating_mul(n as Weight))) + .saturating_add(RocksDbWeight::get().writes(4 as Weight)) + .saturating_add(RocksDbWeight::get().writes((2 as Weight).saturating_mul(n as Weight))) + .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(m as Weight))) + .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(a as Weight))) + } + // Storage: Uniques Asset (r:1 w:1) + // Storage: Uniques Class (r:1 w:1) + // Storage: Uniques CollectionMaxSupply (r:1 w:0) + // Storage: Uniques Account (r:0 w:1) + fn mint() -> Weight { + (42_146_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + } + // Storage: Uniques Class (r:1 w:1) + // Storage: Uniques Asset (r:1 w:1) + // Storage: Uniques Account (r:0 w:1) + // Storage: Uniques ItemPriceOf (r:0 w:1) + fn burn() -> Weight { + (42_960_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(4 as Weight)) + } + // Storage: Uniques Class (r:1 w:0) + // Storage: Uniques Asset (r:1 w:1) + // Storage: Uniques Account (r:0 w:2) + // Storage: Uniques ItemPriceOf (r:0 w:1) + fn transfer() -> Weight { + (33_025_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(4 as Weight)) + } + // Storage: Uniques Class (r:1 w:1) + // Storage: Uniques Asset (r:100 w:100) + /// The range of component `i` is `[0, 5000]`. + fn redeposit(i: u32, ) -> Weight { + (0 as Weight) + // Standard Error: 24_000 + .saturating_add((15_540_000 as Weight).saturating_mul(i as Weight)) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().reads((1 as Weight).saturating_mul(i as Weight))) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(i as Weight))) + } + // Storage: Uniques Asset (r:1 w:1) + // Storage: Uniques Class (r:1 w:0) + fn freeze() -> Weight { + (25_194_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + // Storage: Uniques Asset (r:1 w:1) + // Storage: Uniques Class (r:1 w:0) + fn thaw() -> Weight { + (25_397_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + // Storage: Uniques Class (r:1 w:1) + fn freeze_collection() -> Weight { + (19_278_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + // Storage: Uniques Class (r:1 w:1) + fn thaw_collection() -> Weight { + (19_304_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + // Storage: Uniques OwnershipAcceptance (r:1 w:1) + // Storage: Uniques Class (r:1 w:1) + // Storage: Uniques ClassAccount (r:0 w:2) + fn transfer_ownership() -> Weight { + (28_615_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(4 as Weight)) + } + // Storage: Uniques Class (r:1 w:1) + fn set_team() -> Weight { + (19_943_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + // Storage: Uniques Class (r:1 w:1) + // Storage: Uniques ClassAccount (r:0 w:1) + fn force_item_status() -> Weight { + (22_583_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + } + // Storage: Uniques Class (r:1 w:1) + // Storage: Uniques InstanceMetadataOf (r:1 w:0) + // Storage: Uniques Attribute (r:1 w:1) + fn set_attribute() -> Weight { + (47_520_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + } + // Storage: Uniques Class (r:1 w:1) + // Storage: Uniques InstanceMetadataOf (r:1 w:0) + // Storage: Uniques Attribute (r:1 w:1) + fn clear_attribute() -> Weight { + (45_316_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + } + // Storage: Uniques Class (r:1 w:1) + // Storage: Uniques InstanceMetadataOf (r:1 w:1) + fn set_metadata() -> Weight { + (38_391_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + } + // Storage: Uniques Class (r:1 w:1) + // Storage: Uniques InstanceMetadataOf (r:1 w:1) + fn clear_metadata() -> Weight { + (38_023_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + } + // Storage: Uniques Class (r:1 w:1) + // Storage: Uniques ClassMetadataOf (r:1 w:1) + fn set_collection_metadata() -> Weight { + (37_398_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + } + // Storage: Uniques Class (r:1 w:0) + // Storage: Uniques ClassMetadataOf (r:1 w:1) + fn clear_collection_metadata() -> Weight { + (35_621_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + // Storage: Uniques Class (r:1 w:0) + // Storage: Uniques Asset (r:1 w:1) + fn approve_transfer() -> Weight { + (25_856_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + // Storage: Uniques Class (r:1 w:0) + // Storage: Uniques Asset (r:1 w:1) + fn cancel_approval() -> Weight { + (26_098_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + // Storage: Uniques OwnershipAcceptance (r:1 w:1) + fn set_accept_ownership() -> Weight { + (24_076_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + // Storage: Uniques CollectionMaxSupply (r:1 w:1) + // Storage: Uniques Class (r:1 w:0) + fn set_collection_max_supply() -> Weight { + (22_035_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + // Storage: Uniques Asset (r:1 w:0) + // Storage: Uniques ItemPriceOf (r:0 w:1) + fn set_price() -> Weight { + (22_534_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + // Storage: Uniques Asset (r:1 w:1) + // Storage: Uniques ItemPriceOf (r:1 w:1) + // Storage: Uniques Class (r:1 w:0) + // Storage: Uniques Account (r:0 w:2) + fn buy_item() -> Weight { + (45_272_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes(4 as Weight)) + } +} From af8875d42ac45176a70a9b252b6334a6b62d2337 Mon Sep 17 00:00:00 2001 From: Jegor Sidorenko Date: Mon, 29 Aug 2022 18:44:35 +0300 Subject: [PATCH 002/101] Connect new pallet --- Cargo.lock | 18 ++ Cargo.toml | 1 + bin/node/runtime/Cargo.toml | 4 + bin/node/runtime/src/lib.rs | 25 +- frame/nfts/Cargo.toml | 4 +- frame/nfts/src/benchmarking.rs | 28 +-- frame/nfts/src/lib.rs | 1 - frame/nfts/src/migration.rs | 57 ----- frame/nfts/src/mock.rs | 6 +- frame/nfts/src/tests.rs | 421 ++++++++++++++++----------------- frame/nfts/src/types.rs | 2 +- frame/nfts/src/weights.rs | 10 +- 12 files changed, 270 insertions(+), 307 deletions(-) delete mode 100644 frame/nfts/src/migration.rs diff --git a/Cargo.lock b/Cargo.lock index 64e84fe4710af..945f547fcc0ed 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3375,6 +3375,7 @@ dependencies = [ "pallet-membership", "pallet-mmr", "pallet-multisig", + "pallet-nfts", "pallet-nomination-pools", "pallet-nomination-pools-benchmarking", "pallet-nomination-pools-runtime-api", @@ -5877,6 +5878,23 @@ dependencies = [ "sp-std", ] +[[package]] +name = "pallet-nfts" +version = "4.0.0-dev" +dependencies = [ + "frame-benchmarking", + "frame-support", + "frame-system", + "log", + "pallet-balances", + "parity-scale-codec", + "scale-info", + "sp-core", + "sp-io", + "sp-runtime", + "sp-std", +] + [[package]] name = "pallet-nicks" version = "4.0.0-dev" diff --git a/Cargo.toml b/Cargo.toml index e2907716ca9f2..dfa33c3edc75e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -113,6 +113,7 @@ members = [ "frame/offences", "frame/preimage", "frame/proxy", + "frame/nfts", "frame/nomination-pools", "frame/nomination-pools/benchmarking", "frame/nomination-pools/test-staking", diff --git a/bin/node/runtime/Cargo.toml b/bin/node/runtime/Cargo.toml index 10b15b6ec554d..e9cc7294bd18c 100644 --- a/bin/node/runtime/Cargo.toml +++ b/bin/node/runtime/Cargo.toml @@ -77,6 +77,7 @@ pallet-lottery = { version = "4.0.0-dev", default-features = false, path = "../. pallet-membership = { version = "4.0.0-dev", default-features = false, path = "../../../frame/membership" } pallet-mmr = { version = "4.0.0-dev", default-features = false, path = "../../../frame/merkle-mountain-range" } pallet-multisig = { version = "4.0.0-dev", default-features = false, path = "../../../frame/multisig" } +pallet-nfts = { version = "4.0.0-dev", default-features = false, path = "../../../frame/nfts" } pallet-nomination-pools = { version = "1.0.0", default-features = false, path = "../../../frame/nomination-pools"} pallet-nomination-pools-benchmarking = { version = "1.0.0", default-features = false, optional = true, path = "../../../frame/nomination-pools/benchmarking" } pallet-nomination-pools-runtime-api = { version = "1.0.0-dev", default-features = false, path = "../../../frame/nomination-pools/runtime-api" } @@ -185,6 +186,7 @@ std = [ "pallet-remark/std", "pallet-recovery/std", "pallet-uniques/std", + "pallet-nfts/std", "pallet-vesting/std", "log/std", "frame-try-runtime/std", @@ -239,6 +241,7 @@ runtime-benchmarks = [ "pallet-treasury/runtime-benchmarks", "pallet-utility/runtime-benchmarks", "pallet-uniques/runtime-benchmarks", + "pallet-nfts/runtime-benchmarks", "pallet-vesting/runtime-benchmarks", "pallet-whitelist/runtime-benchmarks", "frame-system-benchmarking", @@ -289,6 +292,7 @@ try-runtime = [ "pallet-transaction-payment/try-runtime", "pallet-treasury/try-runtime", "pallet-uniques/try-runtime", + "pallet-nfts/try-runtime", "pallet-utility/try-runtime", "pallet-vesting/try-runtime", "pallet-whitelist/try-runtime", diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index 0816fedff0347..28cc2452039ac 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -294,7 +294,7 @@ impl InstanceFilter for ProxyType { c, Call::Balances(..) | Call::Assets(..) | Call::Uniques(..) | - Call::Vesting(pallet_vesting::Call::vested_transfer { .. }) | + Call::Nfts(..) | Call::Vesting(pallet_vesting::Call::vested_transfer { .. }) | Call::Indices(pallet_indices::Call::transfer { .. }) ), ProxyType::Governance => matches!( @@ -1476,6 +1476,27 @@ impl pallet_uniques::Config for Runtime { type Locker = (); } +impl pallet_nfts::Config for Runtime { + type Event = Event; + type CollectionId = u32; + type ItemId = u32; + type Currency = Balances; + type ForceOrigin = frame_system::EnsureRoot; + type CollectionDeposit = CollectionDeposit; + type ItemDeposit = ItemDeposit; + type MetadataDepositBase = MetadataDepositBase; + type AttributeDepositBase = MetadataDepositBase; + type DepositPerByte = MetadataDepositPerByte; + type StringLimit = StringLimit; + type KeyLimit = KeyLimit; + type ValueLimit = ValueLimit; + type WeightInfo = pallet_nfts::weights::SubstrateWeight; + #[cfg(feature = "runtime-benchmarks")] + type Helper = (); + type CreateOrigin = AsEnsureOriginWithArg>; + type Locker = (); +} + impl pallet_transaction_storage::Config for Runtime { type Event = Event; type Currency = Balances; @@ -1628,6 +1649,7 @@ construct_runtime!( Lottery: pallet_lottery, Gilt: pallet_gilt, Uniques: pallet_uniques, + Nfts: pallet_nfts, TransactionStorage: pallet_transaction_storage, BagsList: pallet_bags_list, StateTrieMigration: pallet_state_trie_migration, @@ -1743,6 +1765,7 @@ mod benches { [pallet_transaction_storage, TransactionStorage] [pallet_treasury, Treasury] [pallet_uniques, Uniques] + [pallet_nfts, Nfts] [pallet_utility, Utility] [pallet_vesting, Vesting] [pallet_whitelist, Whitelist] diff --git a/frame/nfts/Cargo.toml b/frame/nfts/Cargo.toml index 19b0790947f84..7f1ce4ff416b0 100644 --- a/frame/nfts/Cargo.toml +++ b/frame/nfts/Cargo.toml @@ -1,12 +1,12 @@ [package] -name = "pallet-uniques" +name = "pallet-nfts" version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2021" license = "Apache-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" -description = "FRAME NFT asset management pallet" +description = "FRAME NFTs pallet" readme = "README.md" [package.metadata.docs.rs] diff --git a/frame/nfts/src/benchmarking.rs b/frame/nfts/src/benchmarking.rs index 3e3148b5b5fc2..e30b5ebbd3fe5 100644 --- a/frame/nfts/src/benchmarking.rs +++ b/frame/nfts/src/benchmarking.rs @@ -15,7 +15,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Uniques pallet benchmarking. +//! Nfts pallet benchmarking. #![cfg(feature = "runtime-benchmarks")] @@ -32,7 +32,7 @@ use frame_system::RawOrigin as SystemOrigin; use sp_runtime::traits::Bounded; use sp_std::prelude::*; -use crate::Pallet as Uniques; +use crate::Pallet as Nfts; const SEED: u32 = 0; @@ -42,7 +42,7 @@ fn create_collection, I: 'static>( let caller_lookup = T::Lookup::unlookup(caller.clone()); let collection = T::Helper::collection(0); T::Currency::make_free_balance_be(&caller, DepositBalanceOf::::max_value()); - assert!(Uniques::::force_create( + assert!(Nfts::::force_create( SystemOrigin::Root.into(), collection, caller_lookup.clone(), @@ -58,7 +58,7 @@ fn add_collection_metadata, I: 'static>() -> (T::AccountId, Account whitelist_account!(caller); } let caller_lookup = T::Lookup::unlookup(caller.clone()); - assert!(Uniques::::set_collection_metadata( + assert!(Nfts::::set_collection_metadata( SystemOrigin::Signed(caller.clone()).into(), T::Helper::collection(0), vec![0; T::StringLimit::get() as usize].try_into().unwrap(), @@ -77,7 +77,7 @@ fn mint_item, I: 'static>( } let caller_lookup = T::Lookup::unlookup(caller.clone()); let item = T::Helper::item(index); - assert!(Uniques::::mint( + assert!(Nfts::::mint( SystemOrigin::Signed(caller.clone()).into(), T::Helper::collection(0), item, @@ -95,7 +95,7 @@ fn add_item_metadata, I: 'static>( whitelist_account!(caller); } let caller_lookup = T::Lookup::unlookup(caller.clone()); - assert!(Uniques::::set_metadata( + assert!(Nfts::::set_metadata( SystemOrigin::Signed(caller.clone()).into(), T::Helper::collection(0), item, @@ -115,7 +115,7 @@ fn add_item_attribute, I: 'static>( } let caller_lookup = T::Lookup::unlookup(caller.clone()); let key: BoundedVec<_, _> = vec![0; T::KeyLimit::get() as usize].try_into().unwrap(); - assert!(Uniques::::set_attribute( + assert!(Nfts::::set_attribute( SystemOrigin::Signed(caller.clone()).into(), T::Helper::collection(0), Some(item), @@ -209,7 +209,7 @@ benchmarks_instance_pallet! { let i in 0 .. 5_000; let (collection, caller, caller_lookup) = create_collection::(); let items = (0..i).map(|x| mint_item::(x as u16).0).collect::>(); - Uniques::::force_item_status( + Nfts::::force_item_status( SystemOrigin::Root.into(), collection, caller_lookup.clone(), @@ -235,7 +235,7 @@ benchmarks_instance_pallet! { thaw { let (collection, caller, caller_lookup) = create_collection::(); let (item, ..) = mint_item::(0); - Uniques::::freeze( + Nfts::::freeze( SystemOrigin::Signed(caller.clone()).into(), collection, item, @@ -255,7 +255,7 @@ benchmarks_instance_pallet! { thaw_collection { let (collection, caller, caller_lookup) = create_collection::(); let origin = SystemOrigin::Signed(caller.clone()).into(); - Uniques::::freeze_collection(origin, collection)?; + Nfts::::freeze_collection(origin, collection)?; }: _(SystemOrigin::Signed(caller.clone()), collection) verify { assert_last_event::(Event::CollectionThawed { collection }.into()); @@ -267,7 +267,7 @@ benchmarks_instance_pallet! { let target_lookup = T::Lookup::unlookup(target.clone()); T::Currency::make_free_balance_be(&target, T::Currency::minimum_balance()); let origin = SystemOrigin::Signed(target.clone()).into(); - Uniques::::set_accept_ownership(origin, Some(collection))?; + Nfts::::set_accept_ownership(origin, Some(collection))?; }: _(SystemOrigin::Signed(caller), collection, target_lookup) verify { assert_last_event::(Event::OwnerChanged { collection, new_owner: target }.into()); @@ -379,7 +379,7 @@ benchmarks_instance_pallet! { let delegate: T::AccountId = account("delegate", 0, SEED); let delegate_lookup = T::Lookup::unlookup(delegate.clone()); let origin = SystemOrigin::Signed(caller.clone()).into(); - Uniques::::approve_transfer(origin, collection, item, delegate_lookup.clone())?; + Nfts::::approve_transfer(origin, collection, item, delegate_lookup.clone())?; }: _(SystemOrigin::Signed(caller.clone()), collection, item, Some(delegate_lookup)) verify { assert_last_event::(Event::ApprovalCancelled { collection, item, owner: caller, delegate }.into()); @@ -430,7 +430,7 @@ benchmarks_instance_pallet! { let buyer_lookup = T::Lookup::unlookup(buyer.clone()); let price = ItemPrice::::from(0u32); let origin = SystemOrigin::Signed(seller.clone()).into(); - Uniques::::set_price(origin, collection, item, Some(price.clone()), Some(buyer_lookup))?; + Nfts::::set_price(origin, collection, item, Some(price.clone()), Some(buyer_lookup))?; T::Currency::make_free_balance_be(&buyer, DepositBalanceOf::::max_value()); }: _(SystemOrigin::Signed(buyer.clone()), collection, item, price.clone()) verify { @@ -443,5 +443,5 @@ benchmarks_instance_pallet! { }.into()); } - impl_benchmark_test_suite!(Uniques, crate::mock::new_test_ext(), crate::mock::Test); + impl_benchmark_test_suite!(Nfts, crate::mock::new_test_ext(), crate::mock::Test); } diff --git a/frame/nfts/src/lib.rs b/frame/nfts/src/lib.rs index 70f10ca4f8b39..cb96e8138ba5e 100644 --- a/frame/nfts/src/lib.rs +++ b/frame/nfts/src/lib.rs @@ -39,7 +39,6 @@ mod functions; mod impl_nonfungibles; mod types; -pub mod migration; pub mod weights; use codec::{Decode, Encode}; diff --git a/frame/nfts/src/migration.rs b/frame/nfts/src/migration.rs deleted file mode 100644 index d301f0a3d1eb1..0000000000000 --- a/frame/nfts/src/migration.rs +++ /dev/null @@ -1,57 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2021-2022 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Various pieces of common functionality. -use super::*; -use frame_support::{ - traits::{Get, GetStorageVersion, PalletInfoAccess, StorageVersion}, - weights::Weight, -}; - -/// Migrate the pallet storage to v1. -pub fn migrate_to_v1, I: 'static, P: GetStorageVersion + PalletInfoAccess>( -) -> frame_support::weights::Weight { - let on_chain_storage_version =

::on_chain_storage_version(); - log::info!( - target: "runtime::uniques", - "Running migration storage v1 for uniques with storage version {:?}", - on_chain_storage_version, - ); - - if on_chain_storage_version < 1 { - let mut count = 0; - for (collection, detail) in Collection::::iter() { - CollectionAccount::::insert(&detail.owner, &collection, ()); - count += 1; - } - StorageVersion::new(1).put::

(); - log::info!( - target: "runtime::uniques", - "Running migration storage v1 for uniques with storage version {:?} was complete", - on_chain_storage_version, - ); - // calculate and return migration weights - T::DbWeight::get().reads_writes(count as Weight + 1, count as Weight + 1) - } else { - log::warn!( - target: "runtime::uniques", - "Attempted to apply migration to v1 but failed because storage version is {:?}", - on_chain_storage_version, - ); - T::DbWeight::get().reads(1) - } -} diff --git a/frame/nfts/src/mock.rs b/frame/nfts/src/mock.rs index ff7b791de4950..f3040faac5f40 100644 --- a/frame/nfts/src/mock.rs +++ b/frame/nfts/src/mock.rs @@ -15,10 +15,10 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Test environment for Uniques pallet. +//! Test environment for Nfts pallet. use super::*; -use crate as pallet_uniques; +use crate as pallet_nfts; use frame_support::{ construct_runtime, @@ -41,7 +41,7 @@ construct_runtime!( { System: frame_system::{Pallet, Call, Config, Storage, Event}, Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, - Uniques: pallet_uniques::{Pallet, Call, Storage, Event}, + Nfts: pallet_nfts::{Pallet, Call, Storage, Event}, } ); diff --git a/frame/nfts/src/tests.rs b/frame/nfts/src/tests.rs index 8b1d00d7ba0c7..2b20d124bd9ae 100644 --- a/frame/nfts/src/tests.rs +++ b/frame/nfts/src/tests.rs @@ -15,7 +15,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Tests for Uniques pallet. +//! Tests for Nfts pallet. use crate::{mock::*, Event, *}; use frame_support::{assert_noop, assert_ok, dispatch::Dispatchable, traits::Currency}; @@ -74,7 +74,7 @@ fn events() -> Vec> { let result = System::events() .into_iter() .map(|r| r.event) - .filter_map(|e| if let mock::Event::Uniques(inner) = e { Some(inner) } else { None }) + .filter_map(|e| if let mock::Event::Nfts(inner) = e { Some(inner) } else { None }) .collect::>(); System::reset_events(); @@ -92,14 +92,14 @@ fn basic_setup_works() { #[test] fn basic_minting_should_work() { new_test_ext().execute_with(|| { - assert_ok!(Uniques::force_create(Origin::root(), 0, 1, true)); + assert_ok!(Nfts::force_create(Origin::root(), 0, 1, true)); assert_eq!(collections(), vec![(1, 0)]); - assert_ok!(Uniques::mint(Origin::signed(1), 0, 42, 1)); + assert_ok!(Nfts::mint(Origin::signed(1), 0, 42, 1)); assert_eq!(items(), vec![(1, 0, 42)]); - assert_ok!(Uniques::force_create(Origin::root(), 1, 2, true)); + assert_ok!(Nfts::force_create(Origin::root(), 1, 2, true)); assert_eq!(collections(), vec![(1, 0), (2, 1)]); - assert_ok!(Uniques::mint(Origin::signed(2), 1, 69, 1)); + assert_ok!(Nfts::mint(Origin::signed(2), 1, 69, 1)); assert_eq!(items(), vec![(1, 0, 42), (1, 1, 69)]); }); } @@ -108,32 +108,32 @@ fn basic_minting_should_work() { fn lifecycle_should_work() { new_test_ext().execute_with(|| { Balances::make_free_balance_be(&1, 100); - assert_ok!(Uniques::create(Origin::signed(1), 0, 1)); + assert_ok!(Nfts::create(Origin::signed(1), 0, 1)); assert_eq!(Balances::reserved_balance(&1), 2); assert_eq!(collections(), vec![(1, 0)]); - assert_ok!(Uniques::set_collection_metadata(Origin::signed(1), 0, bvec![0, 0], false)); + assert_ok!(Nfts::set_collection_metadata(Origin::signed(1), 0, bvec![0, 0], false)); assert_eq!(Balances::reserved_balance(&1), 5); assert!(CollectionMetadataOf::::contains_key(0)); - assert_ok!(Uniques::mint(Origin::signed(1), 0, 42, 10)); + assert_ok!(Nfts::mint(Origin::signed(1), 0, 42, 10)); assert_eq!(Balances::reserved_balance(&1), 6); - assert_ok!(Uniques::mint(Origin::signed(1), 0, 69, 20)); + assert_ok!(Nfts::mint(Origin::signed(1), 0, 69, 20)); assert_eq!(Balances::reserved_balance(&1), 7); assert_eq!(items(), vec![(10, 0, 42), (20, 0, 69)]); assert_eq!(Collection::::get(0).unwrap().items, 2); assert_eq!(Collection::::get(0).unwrap().item_metadatas, 0); - assert_ok!(Uniques::set_metadata(Origin::signed(1), 0, 42, bvec![42, 42], false)); + assert_ok!(Nfts::set_metadata(Origin::signed(1), 0, 42, bvec![42, 42], false)); assert_eq!(Balances::reserved_balance(&1), 10); assert!(ItemMetadataOf::::contains_key(0, 42)); - assert_ok!(Uniques::set_metadata(Origin::signed(1), 0, 69, bvec![69, 69], false)); + assert_ok!(Nfts::set_metadata(Origin::signed(1), 0, 69, bvec![69, 69], false)); assert_eq!(Balances::reserved_balance(&1), 13); assert!(ItemMetadataOf::::contains_key(0, 69)); let w = Collection::::get(0).unwrap().destroy_witness(); assert_eq!(w.items, 2); assert_eq!(w.item_metadatas, 2); - assert_ok!(Uniques::destroy(Origin::signed(1), 0, w)); + assert_ok!(Nfts::destroy(Origin::signed(1), 0, w)); assert_eq!(Balances::reserved_balance(&1), 0); assert!(!Collection::::contains_key(0)); @@ -151,20 +151,20 @@ fn lifecycle_should_work() { fn destroy_with_bad_witness_should_not_work() { new_test_ext().execute_with(|| { Balances::make_free_balance_be(&1, 100); - assert_ok!(Uniques::create(Origin::signed(1), 0, 1)); + assert_ok!(Nfts::create(Origin::signed(1), 0, 1)); let w = Collection::::get(0).unwrap().destroy_witness(); - assert_ok!(Uniques::mint(Origin::signed(1), 0, 42, 1)); - assert_noop!(Uniques::destroy(Origin::signed(1), 0, w), Error::::BadWitness); + assert_ok!(Nfts::mint(Origin::signed(1), 0, 42, 1)); + assert_noop!(Nfts::destroy(Origin::signed(1), 0, w), Error::::BadWitness); }); } #[test] fn mint_should_work() { new_test_ext().execute_with(|| { - assert_ok!(Uniques::force_create(Origin::root(), 0, 1, true)); - assert_ok!(Uniques::mint(Origin::signed(1), 0, 42, 1)); - assert_eq!(Uniques::owner(0, 42).unwrap(), 1); + assert_ok!(Nfts::force_create(Origin::root(), 0, 1, true)); + assert_ok!(Nfts::mint(Origin::signed(1), 0, 42, 1)); + assert_eq!(Nfts::owner(0, 42).unwrap(), 1); assert_eq!(collections(), vec![(1, 0)]); assert_eq!(items(), vec![(1, 0, 42)]); }); @@ -173,54 +173,54 @@ fn mint_should_work() { #[test] fn transfer_should_work() { new_test_ext().execute_with(|| { - assert_ok!(Uniques::force_create(Origin::root(), 0, 1, true)); - assert_ok!(Uniques::mint(Origin::signed(1), 0, 42, 2)); + assert_ok!(Nfts::force_create(Origin::root(), 0, 1, true)); + assert_ok!(Nfts::mint(Origin::signed(1), 0, 42, 2)); - assert_ok!(Uniques::transfer(Origin::signed(2), 0, 42, 3)); + assert_ok!(Nfts::transfer(Origin::signed(2), 0, 42, 3)); assert_eq!(items(), vec![(3, 0, 42)]); - assert_noop!(Uniques::transfer(Origin::signed(2), 0, 42, 4), Error::::NoPermission); + assert_noop!(Nfts::transfer(Origin::signed(2), 0, 42, 4), Error::::NoPermission); - assert_ok!(Uniques::approve_transfer(Origin::signed(3), 0, 42, 2)); - assert_ok!(Uniques::transfer(Origin::signed(2), 0, 42, 4)); + assert_ok!(Nfts::approve_transfer(Origin::signed(3), 0, 42, 2)); + assert_ok!(Nfts::transfer(Origin::signed(2), 0, 42, 4)); }); } #[test] fn freezing_should_work() { new_test_ext().execute_with(|| { - assert_ok!(Uniques::force_create(Origin::root(), 0, 1, true)); - assert_ok!(Uniques::mint(Origin::signed(1), 0, 42, 1)); - assert_ok!(Uniques::freeze(Origin::signed(1), 0, 42)); - assert_noop!(Uniques::transfer(Origin::signed(1), 0, 42, 2), Error::::Frozen); + assert_ok!(Nfts::force_create(Origin::root(), 0, 1, true)); + assert_ok!(Nfts::mint(Origin::signed(1), 0, 42, 1)); + assert_ok!(Nfts::freeze(Origin::signed(1), 0, 42)); + assert_noop!(Nfts::transfer(Origin::signed(1), 0, 42, 2), Error::::Frozen); - assert_ok!(Uniques::thaw(Origin::signed(1), 0, 42)); - assert_ok!(Uniques::freeze_collection(Origin::signed(1), 0)); - assert_noop!(Uniques::transfer(Origin::signed(1), 0, 42, 2), Error::::Frozen); + assert_ok!(Nfts::thaw(Origin::signed(1), 0, 42)); + assert_ok!(Nfts::freeze_collection(Origin::signed(1), 0)); + assert_noop!(Nfts::transfer(Origin::signed(1), 0, 42, 2), Error::::Frozen); - assert_ok!(Uniques::thaw_collection(Origin::signed(1), 0)); - assert_ok!(Uniques::transfer(Origin::signed(1), 0, 42, 2)); + assert_ok!(Nfts::thaw_collection(Origin::signed(1), 0)); + assert_ok!(Nfts::transfer(Origin::signed(1), 0, 42, 2)); }); } #[test] fn origin_guards_should_work() { new_test_ext().execute_with(|| { - assert_ok!(Uniques::force_create(Origin::root(), 0, 1, true)); - assert_ok!(Uniques::mint(Origin::signed(1), 0, 42, 1)); + assert_ok!(Nfts::force_create(Origin::root(), 0, 1, true)); + assert_ok!(Nfts::mint(Origin::signed(1), 0, 42, 1)); Balances::make_free_balance_be(&2, 100); - assert_ok!(Uniques::set_accept_ownership(Origin::signed(2), Some(0))); + assert_ok!(Nfts::set_accept_ownership(Origin::signed(2), Some(0))); assert_noop!( - Uniques::transfer_ownership(Origin::signed(2), 0, 2), + Nfts::transfer_ownership(Origin::signed(2), 0, 2), Error::::NoPermission ); - assert_noop!(Uniques::set_team(Origin::signed(2), 0, 2, 2, 2), Error::::NoPermission); - assert_noop!(Uniques::freeze(Origin::signed(2), 0, 42), Error::::NoPermission); - assert_noop!(Uniques::thaw(Origin::signed(2), 0, 42), Error::::NoPermission); - assert_noop!(Uniques::mint(Origin::signed(2), 0, 69, 2), Error::::NoPermission); - assert_noop!(Uniques::burn(Origin::signed(2), 0, 42, None), Error::::NoPermission); + assert_noop!(Nfts::set_team(Origin::signed(2), 0, 2, 2, 2), Error::::NoPermission); + assert_noop!(Nfts::freeze(Origin::signed(2), 0, 42), Error::::NoPermission); + assert_noop!(Nfts::thaw(Origin::signed(2), 0, 42), Error::::NoPermission); + assert_noop!(Nfts::mint(Origin::signed(2), 0, 69, 2), Error::::NoPermission); + assert_noop!(Nfts::burn(Origin::signed(2), 0, 42, None), Error::::NoPermission); let w = Collection::::get(0).unwrap().destroy_witness(); - assert_noop!(Uniques::destroy(Origin::signed(2), 0, w), Error::::NoPermission); + assert_noop!(Nfts::destroy(Origin::signed(2), 0, w), Error::::NoPermission); }); } @@ -230,14 +230,11 @@ fn transfer_owner_should_work() { Balances::make_free_balance_be(&1, 100); Balances::make_free_balance_be(&2, 100); Balances::make_free_balance_be(&3, 100); - assert_ok!(Uniques::create(Origin::signed(1), 0, 1)); + assert_ok!(Nfts::create(Origin::signed(1), 0, 1)); assert_eq!(collections(), vec![(1, 0)]); - assert_noop!( - Uniques::transfer_ownership(Origin::signed(1), 0, 2), - Error::::Unaccepted - ); - assert_ok!(Uniques::set_accept_ownership(Origin::signed(2), Some(0))); - assert_ok!(Uniques::transfer_ownership(Origin::signed(1), 0, 2)); + assert_noop!(Nfts::transfer_ownership(Origin::signed(1), 0, 2), Error::::Unaccepted); + assert_ok!(Nfts::set_accept_ownership(Origin::signed(2), Some(0))); + assert_ok!(Nfts::transfer_ownership(Origin::signed(1), 0, 2)); assert_eq!(collections(), vec![(2, 0)]); assert_eq!(Balances::total_balance(&1), 98); @@ -245,18 +242,18 @@ fn transfer_owner_should_work() { assert_eq!(Balances::reserved_balance(&1), 0); assert_eq!(Balances::reserved_balance(&2), 2); - assert_ok!(Uniques::set_accept_ownership(Origin::signed(1), Some(0))); + assert_ok!(Nfts::set_accept_ownership(Origin::signed(1), Some(0))); assert_noop!( - Uniques::transfer_ownership(Origin::signed(1), 0, 1), + Nfts::transfer_ownership(Origin::signed(1), 0, 1), Error::::NoPermission ); // Mint and set metadata now and make sure that deposit gets transferred back. - assert_ok!(Uniques::set_collection_metadata(Origin::signed(2), 0, bvec![0u8; 20], false)); - assert_ok!(Uniques::mint(Origin::signed(1), 0, 42, 1)); - assert_ok!(Uniques::set_metadata(Origin::signed(2), 0, 42, bvec![0u8; 20], false)); - assert_ok!(Uniques::set_accept_ownership(Origin::signed(3), Some(0))); - assert_ok!(Uniques::transfer_ownership(Origin::signed(2), 0, 3)); + assert_ok!(Nfts::set_collection_metadata(Origin::signed(2), 0, bvec![0u8; 20], false)); + assert_ok!(Nfts::mint(Origin::signed(1), 0, 42, 1)); + assert_ok!(Nfts::set_metadata(Origin::signed(2), 0, 42, bvec![0u8; 20], false)); + assert_ok!(Nfts::set_accept_ownership(Origin::signed(3), Some(0))); + assert_ok!(Nfts::transfer_ownership(Origin::signed(2), 0, 3)); assert_eq!(collections(), vec![(3, 0)]); assert_eq!(Balances::total_balance(&2), 57); assert_eq!(Balances::total_balance(&3), 145); @@ -265,24 +262,21 @@ fn transfer_owner_should_work() { // 2's acceptence from before is reset when it became owner, so it cannot be transfered // without a fresh acceptance. - assert_noop!( - Uniques::transfer_ownership(Origin::signed(3), 0, 2), - Error::::Unaccepted - ); + assert_noop!(Nfts::transfer_ownership(Origin::signed(3), 0, 2), Error::::Unaccepted); }); } #[test] fn set_team_should_work() { new_test_ext().execute_with(|| { - assert_ok!(Uniques::force_create(Origin::root(), 0, 1, true)); - assert_ok!(Uniques::set_team(Origin::signed(1), 0, 2, 3, 4)); - - assert_ok!(Uniques::mint(Origin::signed(2), 0, 42, 2)); - assert_ok!(Uniques::freeze(Origin::signed(4), 0, 42)); - assert_ok!(Uniques::thaw(Origin::signed(3), 0, 42)); - assert_ok!(Uniques::transfer(Origin::signed(3), 0, 42, 3)); - assert_ok!(Uniques::burn(Origin::signed(3), 0, 42, None)); + assert_ok!(Nfts::force_create(Origin::root(), 0, 1, true)); + assert_ok!(Nfts::set_team(Origin::signed(1), 0, 2, 3, 4)); + + assert_ok!(Nfts::mint(Origin::signed(2), 0, 42, 2)); + assert_ok!(Nfts::freeze(Origin::signed(4), 0, 42)); + assert_ok!(Nfts::thaw(Origin::signed(3), 0, 42)); + assert_ok!(Nfts::transfer(Origin::signed(3), 0, 42, 3)); + assert_ok!(Nfts::burn(Origin::signed(3), 0, 42, None)); }); } @@ -291,59 +285,56 @@ fn set_collection_metadata_should_work() { new_test_ext().execute_with(|| { // Cannot add metadata to unknown item assert_noop!( - Uniques::set_collection_metadata(Origin::signed(1), 0, bvec![0u8; 20], false), + Nfts::set_collection_metadata(Origin::signed(1), 0, bvec![0u8; 20], false), Error::::UnknownCollection, ); - assert_ok!(Uniques::force_create(Origin::root(), 0, 1, false)); + assert_ok!(Nfts::force_create(Origin::root(), 0, 1, false)); // Cannot add metadata to unowned item assert_noop!( - Uniques::set_collection_metadata(Origin::signed(2), 0, bvec![0u8; 20], false), + Nfts::set_collection_metadata(Origin::signed(2), 0, bvec![0u8; 20], false), Error::::NoPermission, ); // Successfully add metadata and take deposit Balances::make_free_balance_be(&1, 30); - assert_ok!(Uniques::set_collection_metadata(Origin::signed(1), 0, bvec![0u8; 20], false)); + assert_ok!(Nfts::set_collection_metadata(Origin::signed(1), 0, bvec![0u8; 20], false)); assert_eq!(Balances::free_balance(&1), 9); assert!(CollectionMetadataOf::::contains_key(0)); // Force origin works, too. - assert_ok!(Uniques::set_collection_metadata(Origin::root(), 0, bvec![0u8; 18], false)); + assert_ok!(Nfts::set_collection_metadata(Origin::root(), 0, bvec![0u8; 18], false)); // Update deposit - assert_ok!(Uniques::set_collection_metadata(Origin::signed(1), 0, bvec![0u8; 15], false)); + assert_ok!(Nfts::set_collection_metadata(Origin::signed(1), 0, bvec![0u8; 15], false)); assert_eq!(Balances::free_balance(&1), 14); - assert_ok!(Uniques::set_collection_metadata(Origin::signed(1), 0, bvec![0u8; 25], false)); + assert_ok!(Nfts::set_collection_metadata(Origin::signed(1), 0, bvec![0u8; 25], false)); assert_eq!(Balances::free_balance(&1), 4); // Cannot over-reserve assert_noop!( - Uniques::set_collection_metadata(Origin::signed(1), 0, bvec![0u8; 40], false), + Nfts::set_collection_metadata(Origin::signed(1), 0, bvec![0u8; 40], false), BalancesError::::InsufficientBalance, ); // Can't set or clear metadata once frozen - assert_ok!(Uniques::set_collection_metadata(Origin::signed(1), 0, bvec![0u8; 15], true)); + assert_ok!(Nfts::set_collection_metadata(Origin::signed(1), 0, bvec![0u8; 15], true)); assert_noop!( - Uniques::set_collection_metadata(Origin::signed(1), 0, bvec![0u8; 15], false), + Nfts::set_collection_metadata(Origin::signed(1), 0, bvec![0u8; 15], false), Error::::Frozen, ); - assert_noop!( - Uniques::clear_collection_metadata(Origin::signed(1), 0), - Error::::Frozen - ); + assert_noop!(Nfts::clear_collection_metadata(Origin::signed(1), 0), Error::::Frozen); // Clear Metadata - assert_ok!(Uniques::set_collection_metadata(Origin::root(), 0, bvec![0u8; 15], false)); + assert_ok!(Nfts::set_collection_metadata(Origin::root(), 0, bvec![0u8; 15], false)); assert_noop!( - Uniques::clear_collection_metadata(Origin::signed(2), 0), + Nfts::clear_collection_metadata(Origin::signed(2), 0), Error::::NoPermission ); assert_noop!( - Uniques::clear_collection_metadata(Origin::signed(1), 1), + Nfts::clear_collection_metadata(Origin::signed(1), 1), Error::::UnknownCollection ); - assert_ok!(Uniques::clear_collection_metadata(Origin::signed(1), 0)); + assert_ok!(Nfts::clear_collection_metadata(Origin::signed(1), 0)); assert!(!CollectionMetadataOf::::contains_key(0)); }); } @@ -354,53 +345,50 @@ fn set_item_metadata_should_work() { Balances::make_free_balance_be(&1, 30); // Cannot add metadata to unknown item - assert_ok!(Uniques::force_create(Origin::root(), 0, 1, false)); - assert_ok!(Uniques::mint(Origin::signed(1), 0, 42, 1)); + assert_ok!(Nfts::force_create(Origin::root(), 0, 1, false)); + assert_ok!(Nfts::mint(Origin::signed(1), 0, 42, 1)); // Cannot add metadata to unowned item assert_noop!( - Uniques::set_metadata(Origin::signed(2), 0, 42, bvec![0u8; 20], false), + Nfts::set_metadata(Origin::signed(2), 0, 42, bvec![0u8; 20], false), Error::::NoPermission, ); // Successfully add metadata and take deposit - assert_ok!(Uniques::set_metadata(Origin::signed(1), 0, 42, bvec![0u8; 20], false)); + assert_ok!(Nfts::set_metadata(Origin::signed(1), 0, 42, bvec![0u8; 20], false)); assert_eq!(Balances::free_balance(&1), 8); assert!(ItemMetadataOf::::contains_key(0, 42)); // Force origin works, too. - assert_ok!(Uniques::set_metadata(Origin::root(), 0, 42, bvec![0u8; 18], false)); + assert_ok!(Nfts::set_metadata(Origin::root(), 0, 42, bvec![0u8; 18], false)); // Update deposit - assert_ok!(Uniques::set_metadata(Origin::signed(1), 0, 42, bvec![0u8; 15], false)); + assert_ok!(Nfts::set_metadata(Origin::signed(1), 0, 42, bvec![0u8; 15], false)); assert_eq!(Balances::free_balance(&1), 13); - assert_ok!(Uniques::set_metadata(Origin::signed(1), 0, 42, bvec![0u8; 25], false)); + assert_ok!(Nfts::set_metadata(Origin::signed(1), 0, 42, bvec![0u8; 25], false)); assert_eq!(Balances::free_balance(&1), 3); // Cannot over-reserve assert_noop!( - Uniques::set_metadata(Origin::signed(1), 0, 42, bvec![0u8; 40], false), + Nfts::set_metadata(Origin::signed(1), 0, 42, bvec![0u8; 40], false), BalancesError::::InsufficientBalance, ); // Can't set or clear metadata once frozen - assert_ok!(Uniques::set_metadata(Origin::signed(1), 0, 42, bvec![0u8; 15], true)); + assert_ok!(Nfts::set_metadata(Origin::signed(1), 0, 42, bvec![0u8; 15], true)); assert_noop!( - Uniques::set_metadata(Origin::signed(1), 0, 42, bvec![0u8; 15], false), + Nfts::set_metadata(Origin::signed(1), 0, 42, bvec![0u8; 15], false), Error::::Frozen, ); - assert_noop!(Uniques::clear_metadata(Origin::signed(1), 0, 42), Error::::Frozen); + assert_noop!(Nfts::clear_metadata(Origin::signed(1), 0, 42), Error::::Frozen); // Clear Metadata - assert_ok!(Uniques::set_metadata(Origin::root(), 0, 42, bvec![0u8; 15], false)); - assert_noop!( - Uniques::clear_metadata(Origin::signed(2), 0, 42), - Error::::NoPermission - ); + assert_ok!(Nfts::set_metadata(Origin::root(), 0, 42, bvec![0u8; 15], false)); + assert_noop!(Nfts::clear_metadata(Origin::signed(2), 0, 42), Error::::NoPermission); assert_noop!( - Uniques::clear_metadata(Origin::signed(1), 1, 42), + Nfts::clear_metadata(Origin::signed(1), 1, 42), Error::::UnknownCollection ); - assert_ok!(Uniques::clear_metadata(Origin::signed(1), 0, 42)); + assert_ok!(Nfts::clear_metadata(Origin::signed(1), 0, 42)); assert!(!ItemMetadataOf::::contains_key(0, 42)); }); } @@ -410,11 +398,11 @@ fn set_attribute_should_work() { new_test_ext().execute_with(|| { Balances::make_free_balance_be(&1, 100); - assert_ok!(Uniques::force_create(Origin::root(), 0, 1, false)); + assert_ok!(Nfts::force_create(Origin::root(), 0, 1, false)); - assert_ok!(Uniques::set_attribute(Origin::signed(1), 0, None, bvec![0], bvec![0])); - assert_ok!(Uniques::set_attribute(Origin::signed(1), 0, Some(0), bvec![0], bvec![0])); - assert_ok!(Uniques::set_attribute(Origin::signed(1), 0, Some(0), bvec![1], bvec![0])); + assert_ok!(Nfts::set_attribute(Origin::signed(1), 0, None, bvec![0], bvec![0])); + assert_ok!(Nfts::set_attribute(Origin::signed(1), 0, Some(0), bvec![0], bvec![0])); + assert_ok!(Nfts::set_attribute(Origin::signed(1), 0, Some(0), bvec![1], bvec![0])); assert_eq!( attributes(0), vec![ @@ -425,7 +413,7 @@ fn set_attribute_should_work() { ); assert_eq!(Balances::reserved_balance(1), 9); - assert_ok!(Uniques::set_attribute(Origin::signed(1), 0, None, bvec![0], bvec![0; 10])); + assert_ok!(Nfts::set_attribute(Origin::signed(1), 0, None, bvec![0], bvec![0; 10])); assert_eq!( attributes(0), vec![ @@ -436,7 +424,7 @@ fn set_attribute_should_work() { ); assert_eq!(Balances::reserved_balance(1), 18); - assert_ok!(Uniques::clear_attribute(Origin::signed(1), 0, Some(0), bvec![1])); + assert_ok!(Nfts::clear_attribute(Origin::signed(1), 0, Some(0), bvec![1])); assert_eq!( attributes(0), vec![(None, bvec![0], bvec![0; 10]), (Some(0), bvec![0], bvec![0]),] @@ -444,7 +432,7 @@ fn set_attribute_should_work() { assert_eq!(Balances::reserved_balance(1), 15); let w = Collection::::get(0).unwrap().destroy_witness(); - assert_ok!(Uniques::destroy(Origin::signed(1), 0, w)); + assert_ok!(Nfts::destroy(Origin::signed(1), 0, w)); assert_eq!(attributes(0), vec![]); assert_eq!(Balances::reserved_balance(1), 0); }); @@ -455,11 +443,11 @@ fn set_attribute_should_respect_freeze() { new_test_ext().execute_with(|| { Balances::make_free_balance_be(&1, 100); - assert_ok!(Uniques::force_create(Origin::root(), 0, 1, false)); + assert_ok!(Nfts::force_create(Origin::root(), 0, 1, false)); - assert_ok!(Uniques::set_attribute(Origin::signed(1), 0, None, bvec![0], bvec![0])); - assert_ok!(Uniques::set_attribute(Origin::signed(1), 0, Some(0), bvec![0], bvec![0])); - assert_ok!(Uniques::set_attribute(Origin::signed(1), 0, Some(1), bvec![0], bvec![0])); + assert_ok!(Nfts::set_attribute(Origin::signed(1), 0, None, bvec![0], bvec![0])); + assert_ok!(Nfts::set_attribute(Origin::signed(1), 0, Some(0), bvec![0], bvec![0])); + assert_ok!(Nfts::set_attribute(Origin::signed(1), 0, Some(1), bvec![0], bvec![0])); assert_eq!( attributes(0), vec![ @@ -470,15 +458,15 @@ fn set_attribute_should_respect_freeze() { ); assert_eq!(Balances::reserved_balance(1), 9); - assert_ok!(Uniques::set_collection_metadata(Origin::signed(1), 0, bvec![], true)); + assert_ok!(Nfts::set_collection_metadata(Origin::signed(1), 0, bvec![], true)); let e = Error::::Frozen; - assert_noop!(Uniques::set_attribute(Origin::signed(1), 0, None, bvec![0], bvec![0]), e); - assert_ok!(Uniques::set_attribute(Origin::signed(1), 0, Some(0), bvec![0], bvec![1])); + assert_noop!(Nfts::set_attribute(Origin::signed(1), 0, None, bvec![0], bvec![0]), e); + assert_ok!(Nfts::set_attribute(Origin::signed(1), 0, Some(0), bvec![0], bvec![1])); - assert_ok!(Uniques::set_metadata(Origin::signed(1), 0, 0, bvec![], true)); + assert_ok!(Nfts::set_metadata(Origin::signed(1), 0, 0, bvec![], true)); let e = Error::::Frozen; - assert_noop!(Uniques::set_attribute(Origin::signed(1), 0, Some(0), bvec![0], bvec![1]), e); - assert_ok!(Uniques::set_attribute(Origin::signed(1), 0, Some(1), bvec![0], bvec![1])); + assert_noop!(Nfts::set_attribute(Origin::signed(1), 0, Some(0), bvec![0], bvec![1]), e); + assert_ok!(Nfts::set_attribute(Origin::signed(1), 0, Some(1), bvec![0], bvec![1])); }); } @@ -487,32 +475,32 @@ fn force_item_status_should_work() { new_test_ext().execute_with(|| { Balances::make_free_balance_be(&1, 100); - assert_ok!(Uniques::force_create(Origin::root(), 0, 1, false)); - assert_ok!(Uniques::mint(Origin::signed(1), 0, 42, 1)); - assert_ok!(Uniques::mint(Origin::signed(1), 0, 69, 2)); - assert_ok!(Uniques::set_collection_metadata(Origin::signed(1), 0, bvec![0; 20], false)); - assert_ok!(Uniques::set_metadata(Origin::signed(1), 0, 42, bvec![0; 20], false)); - assert_ok!(Uniques::set_metadata(Origin::signed(1), 0, 69, bvec![0; 20], false)); + assert_ok!(Nfts::force_create(Origin::root(), 0, 1, false)); + assert_ok!(Nfts::mint(Origin::signed(1), 0, 42, 1)); + assert_ok!(Nfts::mint(Origin::signed(1), 0, 69, 2)); + assert_ok!(Nfts::set_collection_metadata(Origin::signed(1), 0, bvec![0; 20], false)); + assert_ok!(Nfts::set_metadata(Origin::signed(1), 0, 42, bvec![0; 20], false)); + assert_ok!(Nfts::set_metadata(Origin::signed(1), 0, 69, bvec![0; 20], false)); assert_eq!(Balances::reserved_balance(1), 65); // force item status to be free holding - assert_ok!(Uniques::force_item_status(Origin::root(), 0, 1, 1, 1, 1, true, false)); - assert_ok!(Uniques::mint(Origin::signed(1), 0, 142, 1)); - assert_ok!(Uniques::mint(Origin::signed(1), 0, 169, 2)); - assert_ok!(Uniques::set_metadata(Origin::signed(1), 0, 142, bvec![0; 20], false)); - assert_ok!(Uniques::set_metadata(Origin::signed(1), 0, 169, bvec![0; 20], false)); + assert_ok!(Nfts::force_item_status(Origin::root(), 0, 1, 1, 1, 1, true, false)); + assert_ok!(Nfts::mint(Origin::signed(1), 0, 142, 1)); + assert_ok!(Nfts::mint(Origin::signed(1), 0, 169, 2)); + assert_ok!(Nfts::set_metadata(Origin::signed(1), 0, 142, bvec![0; 20], false)); + assert_ok!(Nfts::set_metadata(Origin::signed(1), 0, 169, bvec![0; 20], false)); assert_eq!(Balances::reserved_balance(1), 65); - assert_ok!(Uniques::redeposit(Origin::signed(1), 0, bvec![0, 42, 50, 69, 100])); + assert_ok!(Nfts::redeposit(Origin::signed(1), 0, bvec![0, 42, 50, 69, 100])); assert_eq!(Balances::reserved_balance(1), 63); - assert_ok!(Uniques::set_metadata(Origin::signed(1), 0, 42, bvec![0; 20], false)); + assert_ok!(Nfts::set_metadata(Origin::signed(1), 0, 42, bvec![0; 20], false)); assert_eq!(Balances::reserved_balance(1), 42); - assert_ok!(Uniques::set_metadata(Origin::signed(1), 0, 69, bvec![0; 20], false)); + assert_ok!(Nfts::set_metadata(Origin::signed(1), 0, 69, bvec![0; 20], false)); assert_eq!(Balances::reserved_balance(1), 21); - assert_ok!(Uniques::set_collection_metadata(Origin::signed(1), 0, bvec![0; 20], false)); + assert_ok!(Nfts::set_collection_metadata(Origin::signed(1), 0, bvec![0; 20], false)); assert_eq!(Balances::reserved_balance(1), 0); }); } @@ -521,23 +509,23 @@ fn force_item_status_should_work() { fn burn_works() { new_test_ext().execute_with(|| { Balances::make_free_balance_be(&1, 100); - assert_ok!(Uniques::force_create(Origin::root(), 0, 1, false)); - assert_ok!(Uniques::set_team(Origin::signed(1), 0, 2, 3, 4)); + assert_ok!(Nfts::force_create(Origin::root(), 0, 1, false)); + assert_ok!(Nfts::set_team(Origin::signed(1), 0, 2, 3, 4)); assert_noop!( - Uniques::burn(Origin::signed(5), 0, 42, Some(5)), + Nfts::burn(Origin::signed(5), 0, 42, Some(5)), Error::::UnknownCollection ); - assert_ok!(Uniques::mint(Origin::signed(2), 0, 42, 5)); - assert_ok!(Uniques::mint(Origin::signed(2), 0, 69, 5)); + assert_ok!(Nfts::mint(Origin::signed(2), 0, 42, 5)); + assert_ok!(Nfts::mint(Origin::signed(2), 0, 69, 5)); assert_eq!(Balances::reserved_balance(1), 2); - assert_noop!(Uniques::burn(Origin::signed(0), 0, 42, None), Error::::NoPermission); - assert_noop!(Uniques::burn(Origin::signed(5), 0, 42, Some(6)), Error::::WrongOwner); + assert_noop!(Nfts::burn(Origin::signed(0), 0, 42, None), Error::::NoPermission); + assert_noop!(Nfts::burn(Origin::signed(5), 0, 42, Some(6)), Error::::WrongOwner); - assert_ok!(Uniques::burn(Origin::signed(5), 0, 42, Some(5))); - assert_ok!(Uniques::burn(Origin::signed(3), 0, 69, Some(5))); + assert_ok!(Nfts::burn(Origin::signed(5), 0, 42, Some(5))); + assert_ok!(Nfts::burn(Origin::signed(3), 0, 69, Some(5))); assert_eq!(Balances::reserved_balance(1), 0); }); } @@ -545,45 +533,45 @@ fn burn_works() { #[test] fn approval_lifecycle_works() { new_test_ext().execute_with(|| { - assert_ok!(Uniques::force_create(Origin::root(), 0, 1, true)); - assert_ok!(Uniques::mint(Origin::signed(1), 0, 42, 2)); - assert_ok!(Uniques::approve_transfer(Origin::signed(2), 0, 42, 3)); - assert_ok!(Uniques::transfer(Origin::signed(3), 0, 42, 4)); - assert_noop!(Uniques::transfer(Origin::signed(3), 0, 42, 3), Error::::NoPermission); + assert_ok!(Nfts::force_create(Origin::root(), 0, 1, true)); + assert_ok!(Nfts::mint(Origin::signed(1), 0, 42, 2)); + assert_ok!(Nfts::approve_transfer(Origin::signed(2), 0, 42, 3)); + assert_ok!(Nfts::transfer(Origin::signed(3), 0, 42, 4)); + assert_noop!(Nfts::transfer(Origin::signed(3), 0, 42, 3), Error::::NoPermission); assert!(Item::::get(0, 42).unwrap().approved.is_none()); - assert_ok!(Uniques::approve_transfer(Origin::signed(4), 0, 42, 2)); - assert_ok!(Uniques::transfer(Origin::signed(2), 0, 42, 2)); + assert_ok!(Nfts::approve_transfer(Origin::signed(4), 0, 42, 2)); + assert_ok!(Nfts::transfer(Origin::signed(2), 0, 42, 2)); }); } #[test] fn cancel_approval_works() { new_test_ext().execute_with(|| { - assert_ok!(Uniques::force_create(Origin::root(), 0, 1, true)); - assert_ok!(Uniques::mint(Origin::signed(1), 0, 42, 2)); + assert_ok!(Nfts::force_create(Origin::root(), 0, 1, true)); + assert_ok!(Nfts::mint(Origin::signed(1), 0, 42, 2)); - assert_ok!(Uniques::approve_transfer(Origin::signed(2), 0, 42, 3)); + assert_ok!(Nfts::approve_transfer(Origin::signed(2), 0, 42, 3)); assert_noop!( - Uniques::cancel_approval(Origin::signed(2), 1, 42, None), + Nfts::cancel_approval(Origin::signed(2), 1, 42, None), Error::::UnknownCollection ); assert_noop!( - Uniques::cancel_approval(Origin::signed(2), 0, 43, None), + Nfts::cancel_approval(Origin::signed(2), 0, 43, None), Error::::UnknownCollection ); assert_noop!( - Uniques::cancel_approval(Origin::signed(3), 0, 42, None), + Nfts::cancel_approval(Origin::signed(3), 0, 42, None), Error::::NoPermission ); assert_noop!( - Uniques::cancel_approval(Origin::signed(2), 0, 42, Some(4)), + Nfts::cancel_approval(Origin::signed(2), 0, 42, Some(4)), Error::::WrongDelegate ); - assert_ok!(Uniques::cancel_approval(Origin::signed(2), 0, 42, Some(3))); + assert_ok!(Nfts::cancel_approval(Origin::signed(2), 0, 42, Some(3))); assert_noop!( - Uniques::cancel_approval(Origin::signed(2), 0, 42, None), + Nfts::cancel_approval(Origin::signed(2), 0, 42, None), Error::::NoDelegate ); }); @@ -592,26 +580,26 @@ fn cancel_approval_works() { #[test] fn cancel_approval_works_with_admin() { new_test_ext().execute_with(|| { - assert_ok!(Uniques::force_create(Origin::root(), 0, 1, true)); - assert_ok!(Uniques::mint(Origin::signed(1), 0, 42, 2)); + assert_ok!(Nfts::force_create(Origin::root(), 0, 1, true)); + assert_ok!(Nfts::mint(Origin::signed(1), 0, 42, 2)); - assert_ok!(Uniques::approve_transfer(Origin::signed(2), 0, 42, 3)); + assert_ok!(Nfts::approve_transfer(Origin::signed(2), 0, 42, 3)); assert_noop!( - Uniques::cancel_approval(Origin::signed(1), 1, 42, None), + Nfts::cancel_approval(Origin::signed(1), 1, 42, None), Error::::UnknownCollection ); assert_noop!( - Uniques::cancel_approval(Origin::signed(1), 0, 43, None), + Nfts::cancel_approval(Origin::signed(1), 0, 43, None), Error::::UnknownCollection ); assert_noop!( - Uniques::cancel_approval(Origin::signed(1), 0, 42, Some(4)), + Nfts::cancel_approval(Origin::signed(1), 0, 42, Some(4)), Error::::WrongDelegate ); - assert_ok!(Uniques::cancel_approval(Origin::signed(1), 0, 42, Some(3))); + assert_ok!(Nfts::cancel_approval(Origin::signed(1), 0, 42, Some(3))); assert_noop!( - Uniques::cancel_approval(Origin::signed(1), 0, 42, None), + Nfts::cancel_approval(Origin::signed(1), 0, 42, None), Error::::NoDelegate ); }); @@ -620,28 +608,25 @@ fn cancel_approval_works_with_admin() { #[test] fn cancel_approval_works_with_force() { new_test_ext().execute_with(|| { - assert_ok!(Uniques::force_create(Origin::root(), 0, 1, true)); - assert_ok!(Uniques::mint(Origin::signed(1), 0, 42, 2)); + assert_ok!(Nfts::force_create(Origin::root(), 0, 1, true)); + assert_ok!(Nfts::mint(Origin::signed(1), 0, 42, 2)); - assert_ok!(Uniques::approve_transfer(Origin::signed(2), 0, 42, 3)); + assert_ok!(Nfts::approve_transfer(Origin::signed(2), 0, 42, 3)); assert_noop!( - Uniques::cancel_approval(Origin::root(), 1, 42, None), + Nfts::cancel_approval(Origin::root(), 1, 42, None), Error::::UnknownCollection ); assert_noop!( - Uniques::cancel_approval(Origin::root(), 0, 43, None), + Nfts::cancel_approval(Origin::root(), 0, 43, None), Error::::UnknownCollection ); assert_noop!( - Uniques::cancel_approval(Origin::root(), 0, 42, Some(4)), + Nfts::cancel_approval(Origin::root(), 0, 42, Some(4)), Error::::WrongDelegate ); - assert_ok!(Uniques::cancel_approval(Origin::root(), 0, 42, Some(3))); - assert_noop!( - Uniques::cancel_approval(Origin::root(), 0, 42, None), - Error::::NoDelegate - ); + assert_ok!(Nfts::cancel_approval(Origin::root(), 0, 42, Some(3))); + assert_noop!(Nfts::cancel_approval(Origin::root(), 0, 42, None), Error::::NoDelegate); }); } @@ -653,10 +638,10 @@ fn max_supply_should_work() { let max_supply = 2; // validate set_collection_max_supply - assert_ok!(Uniques::force_create(Origin::root(), collection_id, user_id, true)); + assert_ok!(Nfts::force_create(Origin::root(), collection_id, user_id, true)); assert!(!CollectionMaxSupply::::contains_key(collection_id)); - assert_ok!(Uniques::set_collection_max_supply( + assert_ok!(Nfts::set_collection_max_supply( Origin::signed(user_id), collection_id, max_supply @@ -669,24 +654,20 @@ fn max_supply_should_work() { })); assert_noop!( - Uniques::set_collection_max_supply( - Origin::signed(user_id), - collection_id, - max_supply + 1 - ), + Nfts::set_collection_max_supply(Origin::signed(user_id), collection_id, max_supply + 1), Error::::MaxSupplyAlreadySet ); // validate we can't mint more to max supply - assert_ok!(Uniques::mint(Origin::signed(user_id), collection_id, 0, user_id)); - assert_ok!(Uniques::mint(Origin::signed(user_id), collection_id, 1, user_id)); + assert_ok!(Nfts::mint(Origin::signed(user_id), collection_id, 0, user_id)); + assert_ok!(Nfts::mint(Origin::signed(user_id), collection_id, 1, user_id)); assert_noop!( - Uniques::mint(Origin::signed(user_id), collection_id, 2, user_id), + Nfts::mint(Origin::signed(user_id), collection_id, 2, user_id), Error::::MaxSupplyReached ); // validate we remove the CollectionMaxSupply record when we destroy the collection - assert_ok!(Uniques::destroy( + assert_ok!(Nfts::destroy( Origin::signed(user_id), collection_id, Collection::::get(collection_id).unwrap().destroy_witness() @@ -703,20 +684,14 @@ fn set_price_should_work() { let item_1 = 1; let item_2 = 2; - assert_ok!(Uniques::force_create(Origin::root(), collection_id, user_id, true)); + assert_ok!(Nfts::force_create(Origin::root(), collection_id, user_id, true)); - assert_ok!(Uniques::mint(Origin::signed(user_id), collection_id, item_1, user_id)); - assert_ok!(Uniques::mint(Origin::signed(user_id), collection_id, item_2, user_id)); + assert_ok!(Nfts::mint(Origin::signed(user_id), collection_id, item_1, user_id)); + assert_ok!(Nfts::mint(Origin::signed(user_id), collection_id, item_2, user_id)); - assert_ok!(Uniques::set_price( - Origin::signed(user_id), - collection_id, - item_1, - Some(1), - None, - )); + assert_ok!(Nfts::set_price(Origin::signed(user_id), collection_id, item_1, Some(1), None,)); - assert_ok!(Uniques::set_price( + assert_ok!(Nfts::set_price( Origin::signed(user_id), collection_id, item_2, @@ -740,7 +715,7 @@ fn set_price_should_work() { })); // validate we can unset the price - assert_ok!(Uniques::set_price(Origin::signed(user_id), collection_id, item_2, None, None)); + assert_ok!(Nfts::set_price(Origin::signed(user_id), collection_id, item_2, None, None)); assert!(events().contains(&Event::::ItemPriceRemoved { collection: collection_id, item: item_2 @@ -767,13 +742,13 @@ fn buy_item_should_work() { Balances::make_free_balance_be(&user_2, initial_balance); Balances::make_free_balance_be(&user_3, initial_balance); - assert_ok!(Uniques::force_create(Origin::root(), collection_id, user_1, true)); + assert_ok!(Nfts::force_create(Origin::root(), collection_id, user_1, true)); - assert_ok!(Uniques::mint(Origin::signed(user_1), collection_id, item_1, user_1)); - assert_ok!(Uniques::mint(Origin::signed(user_1), collection_id, item_2, user_1)); - assert_ok!(Uniques::mint(Origin::signed(user_1), collection_id, item_3, user_1)); + assert_ok!(Nfts::mint(Origin::signed(user_1), collection_id, item_1, user_1)); + assert_ok!(Nfts::mint(Origin::signed(user_1), collection_id, item_2, user_1)); + assert_ok!(Nfts::mint(Origin::signed(user_1), collection_id, item_3, user_1)); - assert_ok!(Uniques::set_price( + assert_ok!(Nfts::set_price( Origin::signed(user_1), collection_id, item_1, @@ -781,7 +756,7 @@ fn buy_item_should_work() { None, )); - assert_ok!(Uniques::set_price( + assert_ok!(Nfts::set_price( Origin::signed(user_1), collection_id, item_2, @@ -791,12 +766,12 @@ fn buy_item_should_work() { // can't buy for less assert_noop!( - Uniques::buy_item(Origin::signed(user_2), collection_id, item_1, 1), + Nfts::buy_item(Origin::signed(user_2), collection_id, item_1, 1), Error::::BidTooLow ); // pass the higher price to validate it will still deduct correctly - assert_ok!(Uniques::buy_item(Origin::signed(user_2), collection_id, item_1, price_1 + 1,)); + assert_ok!(Nfts::buy_item(Origin::signed(user_2), collection_id, item_1, price_1 + 1,)); // validate the new owner & balances let item = Item::::get(collection_id, item_1).unwrap(); @@ -806,18 +781,18 @@ fn buy_item_should_work() { // can't buy from yourself assert_noop!( - Uniques::buy_item(Origin::signed(user_1), collection_id, item_2, price_2), + Nfts::buy_item(Origin::signed(user_1), collection_id, item_2, price_2), Error::::NoPermission ); // can't buy when the item is listed for a specific buyer assert_noop!( - Uniques::buy_item(Origin::signed(user_2), collection_id, item_2, price_2), + Nfts::buy_item(Origin::signed(user_2), collection_id, item_2, price_2), Error::::NoPermission ); // can buy when I'm a whitelisted buyer - assert_ok!(Uniques::buy_item(Origin::signed(user_3), collection_id, item_2, price_2,)); + assert_ok!(Nfts::buy_item(Origin::signed(user_3), collection_id, item_2, price_2,)); assert!(events().contains(&Event::::ItemBought { collection: collection_id, @@ -832,13 +807,13 @@ fn buy_item_should_work() { // can't buy when item is not for sale assert_noop!( - Uniques::buy_item(Origin::signed(user_2), collection_id, item_3, price_2), + Nfts::buy_item(Origin::signed(user_2), collection_id, item_3, price_2), Error::::NotForSale ); // ensure we can't buy an item when the collection or an item is frozen { - assert_ok!(Uniques::set_price( + assert_ok!(Nfts::set_price( Origin::signed(user_1), collection_id, item_3, @@ -847,21 +822,21 @@ fn buy_item_should_work() { )); // freeze collection - assert_ok!(Uniques::freeze_collection(Origin::signed(user_1), collection_id)); + assert_ok!(Nfts::freeze_collection(Origin::signed(user_1), collection_id)); - let buy_item_call = mock::Call::Uniques(crate::Call::::buy_item { + let buy_item_call = mock::Call::Nfts(crate::Call::::buy_item { collection: collection_id, item: item_3, bid_price: price_1, }); assert_noop!(buy_item_call.dispatch(Origin::signed(user_2)), Error::::Frozen); - assert_ok!(Uniques::thaw_collection(Origin::signed(user_1), collection_id)); + assert_ok!(Nfts::thaw_collection(Origin::signed(user_1), collection_id)); // freeze item - assert_ok!(Uniques::freeze(Origin::signed(user_1), collection_id, item_3)); + assert_ok!(Nfts::freeze(Origin::signed(user_1), collection_id, item_3)); - let buy_item_call = mock::Call::Uniques(crate::Call::::buy_item { + let buy_item_call = mock::Call::Nfts(crate::Call::::buy_item { collection: collection_id, item: item_3, bid_price: price_1, diff --git a/frame/nfts/src/types.rs b/frame/nfts/src/types.rs index 98e056163d28d..1081ec8110288 100644 --- a/frame/nfts/src/types.rs +++ b/frame/nfts/src/types.rs @@ -15,7 +15,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Various basic types for use in the Uniques pallet. +//! Various basic types for use in the Nfts pallet. use super::*; use frame_support::{ diff --git a/frame/nfts/src/weights.rs b/frame/nfts/src/weights.rs index 7c8cb170b1b1d..7537cd3b03e6b 100644 --- a/frame/nfts/src/weights.rs +++ b/frame/nfts/src/weights.rs @@ -15,7 +15,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Autogenerated weights for pallet_uniques +//! Autogenerated weights for pallet_nfts //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev //! DATE: 2022-07-13, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` @@ -32,9 +32,9 @@ // --execution=wasm // --wasm-execution=compiled // --heap-pages=4096 -// --pallet=pallet_uniques +// --pallet=pallet_nfts // --chain=dev -// --output=./frame/uniques/src/weights.rs +// --output=./frame/nfts/src/weights.rs // --template=./.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] @@ -44,7 +44,7 @@ use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; use sp_std::marker::PhantomData; -/// Weight functions needed for pallet_uniques. +/// Weight functions needed for pallet_nfts. pub trait WeightInfo { fn create() -> Weight; fn force_create() -> Weight; @@ -74,7 +74,7 @@ pub trait WeightInfo { fn buy_item() -> Weight; } -/// Weights for pallet_uniques using the Substrate node and recommended hardware. +/// Weights for pallet_nfts using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { // Storage: Uniques Class (r:1 w:1) From 69edfb29d7d34aea045e7a2379560b95ff6f4157 Mon Sep 17 00:00:00 2001 From: Jegor Sidorenko Date: Fri, 9 Sep 2022 18:03:07 +0300 Subject: [PATCH 003/101] Update weights --- frame/nfts/src/weights.rs | 352 +++++++++++++++++++------------------- 1 file changed, 176 insertions(+), 176 deletions(-) diff --git a/frame/nfts/src/weights.rs b/frame/nfts/src/weights.rs index 7537cd3b03e6b..de518b8286665 100644 --- a/frame/nfts/src/weights.rs +++ b/frame/nfts/src/weights.rs @@ -80,16 +80,16 @@ impl WeightInfo for SubstrateWeight { // Storage: Uniques Class (r:1 w:1) // Storage: Uniques ClassAccount (r:0 w:1) fn create() -> Weight { - (33_075_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(33_075_000 as u64) + .saturating_add(T::DbWeight::get().reads(1 as u64)) + .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Uniques Class (r:1 w:1) // Storage: Uniques ClassAccount (r:0 w:1) fn force_create() -> Weight { - (19_528_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(19_528_000 as u64) + .saturating_add(T::DbWeight::get().reads(1 as u64)) + .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Uniques Class (r:1 w:1) // Storage: Uniques Asset (r:1 w:0) @@ -103,192 +103,192 @@ impl WeightInfo for SubstrateWeight { /// The range of component `m` is `[0, 1000]`. /// The range of component `a` is `[0, 1000]`. fn destroy(n: u32, m: u32, a: u32, ) -> Weight { - (0 as Weight) + Weight::from_ref_time(0 as u64) // Standard Error: 25_000 - .saturating_add((13_639_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(Weight::from_ref_time(13_639_000 as u64).saturating_mul(n as u64)) // Standard Error: 25_000 - .saturating_add((2_393_000 as Weight).saturating_mul(m as Weight)) + .saturating_add(Weight::from_ref_time(2_393_000 as u64).saturating_mul(m as u64)) // Standard Error: 25_000 - .saturating_add((2_217_000 as Weight).saturating_mul(a as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(n as Weight))) - .saturating_add(T::DbWeight::get().writes(4 as Weight)) - .saturating_add(T::DbWeight::get().writes((2 as Weight).saturating_mul(n as Weight))) - .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(m as Weight))) - .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(a as Weight))) + .saturating_add(Weight::from_ref_time(2_217_000 as u64).saturating_mul(a as u64)) + .saturating_add(T::DbWeight::get().reads(2 as u64)) + .saturating_add(T::DbWeight::get().reads((1 as u64).saturating_mul(n as u64))) + .saturating_add(T::DbWeight::get().writes(4 as u64)) + .saturating_add(T::DbWeight::get().writes((2 as u64).saturating_mul(n as u64))) + .saturating_add(T::DbWeight::get().writes((1 as u64).saturating_mul(m as u64))) + .saturating_add(T::DbWeight::get().writes((1 as u64).saturating_mul(a as u64))) } // Storage: Uniques Asset (r:1 w:1) // Storage: Uniques Class (r:1 w:1) // Storage: Uniques CollectionMaxSupply (r:1 w:0) // Storage: Uniques Account (r:0 w:1) fn mint() -> Weight { - (42_146_000 as Weight) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) + Weight::from_ref_time(42_146_000 as u64) + .saturating_add(T::DbWeight::get().reads(3 as u64)) + .saturating_add(T::DbWeight::get().writes(3 as u64)) } // Storage: Uniques Class (r:1 w:1) // Storage: Uniques Asset (r:1 w:1) // Storage: Uniques Account (r:0 w:1) // Storage: Uniques ItemPriceOf (r:0 w:1) fn burn() -> Weight { - (42_960_000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(4 as Weight)) + Weight::from_ref_time(42_960_000 as u64) + .saturating_add(T::DbWeight::get().reads(2 as u64)) + .saturating_add(T::DbWeight::get().writes(4 as u64)) } // Storage: Uniques Class (r:1 w:0) // Storage: Uniques Asset (r:1 w:1) // Storage: Uniques Account (r:0 w:2) // Storage: Uniques ItemPriceOf (r:0 w:1) fn transfer() -> Weight { - (33_025_000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(4 as Weight)) + Weight::from_ref_time(33_025_000 as u64) + .saturating_add(T::DbWeight::get().reads(2 as u64)) + .saturating_add(T::DbWeight::get().writes(4 as u64)) } // Storage: Uniques Class (r:1 w:1) // Storage: Uniques Asset (r:100 w:100) /// The range of component `i` is `[0, 5000]`. fn redeposit(i: u32, ) -> Weight { - (0 as Weight) + Weight::from_ref_time(0 as u64) // Standard Error: 24_000 - .saturating_add((15_540_000 as Weight).saturating_mul(i as Weight)) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(i as Weight))) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) - .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(i as Weight))) + .saturating_add(Weight::from_ref_time(15_540_000 as u64).saturating_mul(i as u64)) + .saturating_add(T::DbWeight::get().reads(1 as u64)) + .saturating_add(T::DbWeight::get().reads((1 as u64).saturating_mul(i as u64))) + .saturating_add(T::DbWeight::get().writes(1 as u64)) + .saturating_add(T::DbWeight::get().writes((1 as u64).saturating_mul(i as u64))) } // Storage: Uniques Asset (r:1 w:1) // Storage: Uniques Class (r:1 w:0) fn freeze() -> Weight { - (25_194_000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(25_194_000 as u64) + .saturating_add(T::DbWeight::get().reads(2 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Uniques Asset (r:1 w:1) // Storage: Uniques Class (r:1 w:0) fn thaw() -> Weight { - (25_397_000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(25_397_000 as u64) + .saturating_add(T::DbWeight::get().reads(2 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Uniques Class (r:1 w:1) fn freeze_collection() -> Weight { - (19_278_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(19_278_000 as u64) + .saturating_add(T::DbWeight::get().reads(1 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Uniques Class (r:1 w:1) fn thaw_collection() -> Weight { - (19_304_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(19_304_000 as u64) + .saturating_add(T::DbWeight::get().reads(1 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Uniques OwnershipAcceptance (r:1 w:1) // Storage: Uniques Class (r:1 w:1) // Storage: Uniques ClassAccount (r:0 w:2) fn transfer_ownership() -> Weight { - (28_615_000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(4 as Weight)) + Weight::from_ref_time(28_615_000 as u64) + .saturating_add(T::DbWeight::get().reads(2 as u64)) + .saturating_add(T::DbWeight::get().writes(4 as u64)) } // Storage: Uniques Class (r:1 w:1) fn set_team() -> Weight { - (19_943_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(19_943_000 as u64) + .saturating_add(T::DbWeight::get().reads(1 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Uniques Class (r:1 w:1) // Storage: Uniques ClassAccount (r:0 w:1) fn force_item_status() -> Weight { - (22_583_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(22_583_000 as u64) + .saturating_add(T::DbWeight::get().reads(1 as u64)) + .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Uniques Class (r:1 w:1) // Storage: Uniques InstanceMetadataOf (r:1 w:0) // Storage: Uniques Attribute (r:1 w:1) fn set_attribute() -> Weight { - (47_520_000 as Weight) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(47_520_000 as u64) + .saturating_add(T::DbWeight::get().reads(3 as u64)) + .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Uniques Class (r:1 w:1) // Storage: Uniques InstanceMetadataOf (r:1 w:0) // Storage: Uniques Attribute (r:1 w:1) fn clear_attribute() -> Weight { - (45_316_000 as Weight) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(45_316_000 as u64) + .saturating_add(T::DbWeight::get().reads(3 as u64)) + .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Uniques Class (r:1 w:1) // Storage: Uniques InstanceMetadataOf (r:1 w:1) fn set_metadata() -> Weight { - (38_391_000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(38_391_000 as u64) + .saturating_add(T::DbWeight::get().reads(2 as u64)) + .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Uniques Class (r:1 w:1) // Storage: Uniques InstanceMetadataOf (r:1 w:1) fn clear_metadata() -> Weight { - (38_023_000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(38_023_000 as u64) + .saturating_add(T::DbWeight::get().reads(2 as u64)) + .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Uniques Class (r:1 w:1) // Storage: Uniques ClassMetadataOf (r:1 w:1) fn set_collection_metadata() -> Weight { - (37_398_000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(37_398_000 as u64) + .saturating_add(T::DbWeight::get().reads(2 as u64)) + .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Uniques Class (r:1 w:0) // Storage: Uniques ClassMetadataOf (r:1 w:1) fn clear_collection_metadata() -> Weight { - (35_621_000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(35_621_000 as u64) + .saturating_add(T::DbWeight::get().reads(2 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Uniques Class (r:1 w:0) // Storage: Uniques Asset (r:1 w:1) fn approve_transfer() -> Weight { - (25_856_000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(25_856_000 as u64) + .saturating_add(T::DbWeight::get().reads(2 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Uniques Class (r:1 w:0) // Storage: Uniques Asset (r:1 w:1) fn cancel_approval() -> Weight { - (26_098_000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(26_098_000 as u64) + .saturating_add(T::DbWeight::get().reads(2 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Uniques OwnershipAcceptance (r:1 w:1) fn set_accept_ownership() -> Weight { - (24_076_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(24_076_000 as u64) + .saturating_add(T::DbWeight::get().reads(1 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Uniques CollectionMaxSupply (r:1 w:1) // Storage: Uniques Class (r:1 w:0) fn set_collection_max_supply() -> Weight { - (22_035_000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(22_035_000 as u64) + .saturating_add(T::DbWeight::get().reads(2 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Uniques Asset (r:1 w:0) // Storage: Uniques ItemPriceOf (r:0 w:1) fn set_price() -> Weight { - (22_534_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(22_534_000 as u64) + .saturating_add(T::DbWeight::get().reads(1 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Uniques Asset (r:1 w:1) // Storage: Uniques ItemPriceOf (r:1 w:1) // Storage: Uniques Class (r:1 w:0) // Storage: Uniques Account (r:0 w:2) fn buy_item() -> Weight { - (45_272_000 as Weight) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(4 as Weight)) + Weight::from_ref_time(45_272_000 as u64) + .saturating_add(T::DbWeight::get().reads(3 as u64)) + .saturating_add(T::DbWeight::get().writes(4 as u64)) } } @@ -297,16 +297,16 @@ impl WeightInfo for () { // Storage: Uniques Class (r:1 w:1) // Storage: Uniques ClassAccount (r:0 w:1) fn create() -> Weight { - (33_075_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(33_075_000 as u64) + .saturating_add(RocksDbWeight::get().reads(1 as u64)) + .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Uniques Class (r:1 w:1) // Storage: Uniques ClassAccount (r:0 w:1) fn force_create() -> Weight { - (19_528_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(19_528_000 as u64) + .saturating_add(RocksDbWeight::get().reads(1 as u64)) + .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Uniques Class (r:1 w:1) // Storage: Uniques Asset (r:1 w:0) @@ -320,191 +320,191 @@ impl WeightInfo for () { /// The range of component `m` is `[0, 1000]`. /// The range of component `a` is `[0, 1000]`. fn destroy(n: u32, m: u32, a: u32, ) -> Weight { - (0 as Weight) + Weight::from_ref_time(0 as u64) // Standard Error: 25_000 - .saturating_add((13_639_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(Weight::from_ref_time(13_639_000 as u64).saturating_mul(n as u64)) // Standard Error: 25_000 - .saturating_add((2_393_000 as Weight).saturating_mul(m as Weight)) + .saturating_add(Weight::from_ref_time(2_393_000 as u64).saturating_mul(m as u64)) // Standard Error: 25_000 - .saturating_add((2_217_000 as Weight).saturating_mul(a as Weight)) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().reads((1 as Weight).saturating_mul(n as Weight))) - .saturating_add(RocksDbWeight::get().writes(4 as Weight)) - .saturating_add(RocksDbWeight::get().writes((2 as Weight).saturating_mul(n as Weight))) - .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(m as Weight))) - .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(a as Weight))) + .saturating_add(Weight::from_ref_time(2_217_000 as u64).saturating_mul(a as u64)) + .saturating_add(RocksDbWeight::get().reads(2 as u64)) + .saturating_add(RocksDbWeight::get().reads((1 as u64).saturating_mul(n as u64))) + .saturating_add(RocksDbWeight::get().writes(4 as u64)) + .saturating_add(RocksDbWeight::get().writes((2 as u64).saturating_mul(n as u64))) + .saturating_add(RocksDbWeight::get().writes((1 as u64).saturating_mul(m as u64))) + .saturating_add(RocksDbWeight::get().writes((1 as u64).saturating_mul(a as u64))) } // Storage: Uniques Asset (r:1 w:1) // Storage: Uniques Class (r:1 w:1) // Storage: Uniques CollectionMaxSupply (r:1 w:0) // Storage: Uniques Account (r:0 w:1) fn mint() -> Weight { - (42_146_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(3 as Weight)) - .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + Weight::from_ref_time(42_146_000 as u64) + .saturating_add(RocksDbWeight::get().reads(3 as u64)) + .saturating_add(RocksDbWeight::get().writes(3 as u64)) } // Storage: Uniques Class (r:1 w:1) // Storage: Uniques Asset (r:1 w:1) // Storage: Uniques Account (r:0 w:1) // Storage: Uniques ItemPriceOf (r:0 w:1) fn burn() -> Weight { - (42_960_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(4 as Weight)) + Weight::from_ref_time(42_960_000 as u64) + .saturating_add(RocksDbWeight::get().reads(2 as u64)) + .saturating_add(RocksDbWeight::get().writes(4 as u64)) } // Storage: Uniques Class (r:1 w:0) // Storage: Uniques Asset (r:1 w:1) // Storage: Uniques Account (r:0 w:2) // Storage: Uniques ItemPriceOf (r:0 w:1) fn transfer() -> Weight { - (33_025_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(4 as Weight)) + Weight::from_ref_time(33_025_000 as u64) + .saturating_add(RocksDbWeight::get().reads(2 as u64)) + .saturating_add(RocksDbWeight::get().writes(4 as u64)) } // Storage: Uniques Class (r:1 w:1) // Storage: Uniques Asset (r:100 w:100) /// The range of component `i` is `[0, 5000]`. fn redeposit(i: u32, ) -> Weight { - (0 as Weight) + Weight::from_ref_time(0 as u64) // Standard Error: 24_000 - .saturating_add((15_540_000 as Weight).saturating_mul(i as Weight)) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().reads((1 as Weight).saturating_mul(i as Weight))) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(i as Weight))) + .saturating_add(Weight::from_ref_time(15_540_000 as u64).saturating_mul(i as u64)) + .saturating_add(RocksDbWeight::get().reads(1 as u64)) + .saturating_add(RocksDbWeight::get().reads((1 as u64).saturating_mul(i as u64))) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) + .saturating_add(RocksDbWeight::get().writes((1 as u64).saturating_mul(i as u64))) } // Storage: Uniques Asset (r:1 w:1) // Storage: Uniques Class (r:1 w:0) fn freeze() -> Weight { - (25_194_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(25_194_000 as u64) + .saturating_add(RocksDbWeight::get().reads(2 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Uniques Asset (r:1 w:1) // Storage: Uniques Class (r:1 w:0) fn thaw() -> Weight { - (25_397_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(25_397_000 as u64) + .saturating_add(RocksDbWeight::get().reads(2 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Uniques Class (r:1 w:1) fn freeze_collection() -> Weight { - (19_278_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(19_278_000 as u64) + .saturating_add(RocksDbWeight::get().reads(1 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Uniques Class (r:1 w:1) fn thaw_collection() -> Weight { - (19_304_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(19_304_000 as u64) + .saturating_add(RocksDbWeight::get().reads(1 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Uniques OwnershipAcceptance (r:1 w:1) // Storage: Uniques Class (r:1 w:1) // Storage: Uniques ClassAccount (r:0 w:2) fn transfer_ownership() -> Weight { - (28_615_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(4 as Weight)) + Weight::from_ref_time(28_615_000 as u64) + .saturating_add(RocksDbWeight::get().reads(2 as u64)) + .saturating_add(RocksDbWeight::get().writes(4 as u64)) } // Storage: Uniques Class (r:1 w:1) fn set_team() -> Weight { - (19_943_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(19_943_000 as u64) + .saturating_add(RocksDbWeight::get().reads(1 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Uniques Class (r:1 w:1) // Storage: Uniques ClassAccount (r:0 w:1) fn force_item_status() -> Weight { - (22_583_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(22_583_000 as u64) + .saturating_add(RocksDbWeight::get().reads(1 as u64)) + .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Uniques Class (r:1 w:1) // Storage: Uniques InstanceMetadataOf (r:1 w:0) // Storage: Uniques Attribute (r:1 w:1) fn set_attribute() -> Weight { - (47_520_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(3 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(47_520_000 as u64) + .saturating_add(RocksDbWeight::get().reads(3 as u64)) + .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Uniques Class (r:1 w:1) // Storage: Uniques InstanceMetadataOf (r:1 w:0) // Storage: Uniques Attribute (r:1 w:1) fn clear_attribute() -> Weight { - (45_316_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(3 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(45_316_000 as u64) + .saturating_add(RocksDbWeight::get().reads(3 as u64)) + .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Uniques Class (r:1 w:1) // Storage: Uniques InstanceMetadataOf (r:1 w:1) fn set_metadata() -> Weight { - (38_391_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(38_391_000 as u64) + .saturating_add(RocksDbWeight::get().reads(2 as u64)) + .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Uniques Class (r:1 w:1) // Storage: Uniques InstanceMetadataOf (r:1 w:1) fn clear_metadata() -> Weight { - (38_023_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(38_023_000 as u64) + .saturating_add(RocksDbWeight::get().reads(2 as u64)) + .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Uniques Class (r:1 w:1) // Storage: Uniques ClassMetadataOf (r:1 w:1) fn set_collection_metadata() -> Weight { - (37_398_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(37_398_000 as u64) + .saturating_add(RocksDbWeight::get().reads(2 as u64)) + .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Uniques Class (r:1 w:0) // Storage: Uniques ClassMetadataOf (r:1 w:1) fn clear_collection_metadata() -> Weight { - (35_621_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(35_621_000 as u64) + .saturating_add(RocksDbWeight::get().reads(2 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Uniques Class (r:1 w:0) // Storage: Uniques Asset (r:1 w:1) fn approve_transfer() -> Weight { - (25_856_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(25_856_000 as u64) + .saturating_add(RocksDbWeight::get().reads(2 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Uniques Class (r:1 w:0) // Storage: Uniques Asset (r:1 w:1) fn cancel_approval() -> Weight { - (26_098_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(26_098_000 as u64) + .saturating_add(RocksDbWeight::get().reads(2 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Uniques OwnershipAcceptance (r:1 w:1) fn set_accept_ownership() -> Weight { - (24_076_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(24_076_000 as u64) + .saturating_add(RocksDbWeight::get().reads(1 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Uniques CollectionMaxSupply (r:1 w:1) // Storage: Uniques Class (r:1 w:0) fn set_collection_max_supply() -> Weight { - (22_035_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(22_035_000 as u64) + .saturating_add(RocksDbWeight::get().reads(2 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Uniques Asset (r:1 w:0) // Storage: Uniques ItemPriceOf (r:0 w:1) fn set_price() -> Weight { - (22_534_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(22_534_000 as u64) + .saturating_add(RocksDbWeight::get().reads(1 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Uniques Asset (r:1 w:1) // Storage: Uniques ItemPriceOf (r:1 w:1) // Storage: Uniques Class (r:1 w:0) // Storage: Uniques Account (r:0 w:2) fn buy_item() -> Weight { - (45_272_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(3 as Weight)) - .saturating_add(RocksDbWeight::get().writes(4 as Weight)) + Weight::from_ref_time(45_272_000 as u64) + .saturating_add(RocksDbWeight::get().reads(3 as u64)) + .saturating_add(RocksDbWeight::get().writes(4 as u64)) } } From 028ea383058fae970cc6b6e2fd51d597f236ac25 Mon Sep 17 00:00:00 2001 From: Sergej Sakac <73715684+Szegoo@users.noreply.github.com> Date: Tue, 13 Sep 2022 15:34:28 +0200 Subject: [PATCH 004/101] Nfts: Multiple approvals (#12178) * multiple approvals * clear * tests & clean up * fix in logic & fmt * fix benchmarks * deadline * test deadline * current_block + deadline * update ApprovedTransfer event * benchmark * docs * Update frame/nfts/src/lib.rs Co-authored-by: Jegor Sidorenko <5252494+jsidorenko@users.noreply.github.com> * fmt fix * Update frame/nfts/src/lib.rs Co-authored-by: Jegor Sidorenko <5252494+jsidorenko@users.noreply.github.com> * update tests * anyone can cancel * Update frame/nfts/src/tests.rs Co-authored-by: Jegor Sidorenko <5252494+jsidorenko@users.noreply.github.com> * fmt * fix logic * unnecessary line * ".git/.scripts/bench-bot.sh" pallet dev pallet_nfts * Update frame/nfts/src/lib.rs * Update lib.rs * fmt * Update frame/nfts/src/lib.rs Co-authored-by: Squirrel * Update frame/nfts/src/lib.rs Co-authored-by: Squirrel * fmt * Update frame/nfts/src/lib.rs Co-authored-by: Squirrel * suggestion * new line * ".git/.scripts/bench-bot.sh" pallet dev pallet_nfts Co-authored-by: Jegor Sidorenko <5252494+jsidorenko@users.noreply.github.com> Co-authored-by: command-bot <> Co-authored-by: Squirrel --- bin/node/runtime/src/lib.rs | 2 + frame/nfts/src/benchmarking.rs | 23 +- frame/nfts/src/functions.rs | 13 +- frame/nfts/src/lib.rs | 126 ++++++++-- frame/nfts/src/mock.rs | 1 + frame/nfts/src/tests.rs | 165 ++++++++++--- frame/nfts/src/types.rs | 6 +- frame/nfts/src/weights.rs | 409 +++++++++++++++++---------------- 8 files changed, 491 insertions(+), 254 deletions(-) diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index 7b83ef98d7d50..c34c46b04c0dc 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -1453,6 +1453,7 @@ parameter_types! { pub const ItemDeposit: Balance = 1 * DOLLARS; pub const KeyLimit: u32 = 32; pub const ValueLimit: u32 = 256; + pub const ApprovalsLimit: u32 = 20; } impl pallet_uniques::Config for Runtime { @@ -1490,6 +1491,7 @@ impl pallet_nfts::Config for Runtime { type StringLimit = StringLimit; type KeyLimit = KeyLimit; type ValueLimit = ValueLimit; + type ApprovalsLimit = ApprovalsLimit; type WeightInfo = pallet_nfts::weights::SubstrateWeight; #[cfg(feature = "runtime-benchmarks")] type Helper = (); diff --git a/frame/nfts/src/benchmarking.rs b/frame/nfts/src/benchmarking.rs index e30b5ebbd3fe5..ca38851222e8d 100644 --- a/frame/nfts/src/benchmarking.rs +++ b/frame/nfts/src/benchmarking.rs @@ -368,9 +368,10 @@ benchmarks_instance_pallet! { let (item, ..) = mint_item::(0); let delegate: T::AccountId = account("delegate", 0, SEED); let delegate_lookup = T::Lookup::unlookup(delegate.clone()); - }: _(SystemOrigin::Signed(caller.clone()), collection, item, delegate_lookup) + let deadline = T::BlockNumber::max_value(); + }: _(SystemOrigin::Signed(caller.clone()), collection, item, delegate_lookup, Some(deadline)) verify { - assert_last_event::(Event::ApprovedTransfer { collection, item, owner: caller, delegate }.into()); + assert_last_event::(Event::ApprovedTransfer { collection, item, owner: caller, delegate, deadline: Some(deadline) }.into()); } cancel_approval { @@ -379,12 +380,26 @@ benchmarks_instance_pallet! { let delegate: T::AccountId = account("delegate", 0, SEED); let delegate_lookup = T::Lookup::unlookup(delegate.clone()); let origin = SystemOrigin::Signed(caller.clone()).into(); - Nfts::::approve_transfer(origin, collection, item, delegate_lookup.clone())?; - }: _(SystemOrigin::Signed(caller.clone()), collection, item, Some(delegate_lookup)) + let deadline = T::BlockNumber::max_value(); + Nfts::::approve_transfer(origin, collection, item, delegate_lookup.clone(), Some(deadline))?; + }: _(SystemOrigin::Signed(caller.clone()), collection, item, delegate_lookup) verify { assert_last_event::(Event::ApprovalCancelled { collection, item, owner: caller, delegate }.into()); } + clear_all_transfer_approvals { + let (collection, caller, _) = create_collection::(); + let (item, ..) = mint_item::(0); + let delegate: T::AccountId = account("delegate", 0, SEED); + let delegate_lookup = T::Lookup::unlookup(delegate.clone()); + let origin = SystemOrigin::Signed(caller.clone()).into(); + let deadline = T::BlockNumber::max_value(); + Nfts::::approve_transfer(origin, collection, item, delegate_lookup.clone(), Some(deadline))?; + }: _(SystemOrigin::Signed(caller.clone()), collection, item) + verify { + assert_last_event::(Event::AllApprovalsCancelled {collection, item, owner: caller}.into()); + } + set_accept_ownership { let caller: T::AccountId = whitelisted_caller(); T::Currency::make_free_balance_be(&caller, DepositBalanceOf::::max_value()); diff --git a/frame/nfts/src/functions.rs b/frame/nfts/src/functions.rs index 107214558307f..27ab752dbabf6 100644 --- a/frame/nfts/src/functions.rs +++ b/frame/nfts/src/functions.rs @@ -48,6 +48,12 @@ impl, I: 'static> Pallet { Account::::insert((&dest, &collection, &item), ()); let origin = details.owner; details.owner = dest; + + // The approved accounts have to be reset to None, because otherwise pre-approve attack + // would be possible, where the owner can approve his second account before making the + // transaction and then claiming the item back. + details.approvals.clear(); + Item::::insert(&collection, &item, &details); ItemPriceOf::::remove(&collection, &item); @@ -168,7 +174,12 @@ impl, I: 'static> Pallet { let owner = owner.clone(); Account::::insert((&owner, &collection, &item), ()); - let details = ItemDetails { owner, approved: None, is_frozen: false, deposit }; + let details = ItemDetails { + owner, + approvals: ApprovalsOf::::default(), + is_frozen: false, + deposit, + }; Item::::insert(&collection, &item, details); Ok(()) }, diff --git a/frame/nfts/src/lib.rs b/frame/nfts/src/lib.rs index cb96e8138ba5e..14691c21a0ef2 100644 --- a/frame/nfts/src/lib.rs +++ b/frame/nfts/src/lib.rs @@ -46,7 +46,7 @@ use frame_support::{ traits::{ tokens::Locker, BalanceStatus::Reserved, Currency, EnsureOriginWithArg, ReservableCurrency, }, - transactional, + transactional, BoundedBTreeMap, }; use frame_system::Config as SystemConfig; use sp_runtime::{ @@ -59,7 +59,7 @@ pub use pallet::*; pub use types::*; pub use weights::WeightInfo; -type AccountIdLookupOf = <::Lookup as StaticLookup>::Source; +type AccountIdLookupOf = <::Lookup as StaticLookup>::Source; #[frame_support::pallet] pub mod pallet { @@ -149,6 +149,10 @@ pub mod pallet { #[pallet::constant] type ValueLimit: Get; + /// The maximum approvals an item could have. + #[pallet::constant] + type ApprovalsLimit: Get; + #[cfg(feature = "runtime-benchmarks")] /// A set of helper functions for benchmarking. type Helper: BenchmarkHelper; @@ -157,6 +161,12 @@ pub mod pallet { type WeightInfo: WeightInfo; } + pub type ApprovalsOf = BoundedBTreeMap< + ::AccountId, + Option<::BlockNumber>, + >::ApprovalsLimit, + >; + #[pallet::storage] #[pallet::storage_prefix = "Class"] /// Details of a collection. @@ -209,7 +219,7 @@ pub mod pallet { T::CollectionId, Blake2_128Concat, T::ItemId, - ItemDetails>, + ItemDetails, ApprovalsOf>, OptionQuery, >; @@ -311,6 +321,7 @@ pub mod pallet { item: T::ItemId, owner: T::AccountId, delegate: T::AccountId, + deadline: Option<::BlockNumber>, }, /// An approval for a `delegate` account to transfer the `item` of an item /// `collection` was cancelled by its `owner`. @@ -320,6 +331,8 @@ pub mod pallet { owner: T::AccountId, delegate: T::AccountId, }, + /// All approvals of an item got cancelled. + AllApprovalsCancelled { collection: T::CollectionId, item: T::ItemId, owner: T::AccountId }, /// A `collection` has had its attributes changed by the `Force` origin. ItemStatusChanged { collection: T::CollectionId }, /// New metadata has been set for a `collection`. @@ -385,6 +398,8 @@ pub mod pallet { UnknownCollection, /// The item ID has already been used for an item. AlreadyExists, + /// The approval had a deadline that expired, so the approval isn't valid anymore. + ApprovalExpired, /// The owner turned out to be different to what was expected. WrongOwner, /// Invalid witness data given. @@ -393,10 +408,10 @@ pub mod pallet { InUse, /// The item or collection is frozen. Frozen, + /// The provided account is not a delegate. + NotDelegate, /// The delegate turned out to be different to what was expected. WrongDelegate, - /// There is no delegate approved. - NoDelegate, /// No approval exists that would allow the transfer. Unapproved, /// The named owner has not signed ownership of the collection is acceptable. @@ -415,6 +430,8 @@ pub mod pallet { NotForSale, /// The provided bid is too low. BidTooLow, + /// The item has reached its approval limit. + ReachedApprovalLimit, } impl, I: 'static> Pallet { @@ -630,8 +647,12 @@ pub mod pallet { Self::do_transfer(collection, item, dest, |collection_details, details| { if details.owner != origin && collection_details.admin != origin { - let approved = details.approved.take().map_or(false, |i| i == origin); - ensure!(approved, Error::::NoPermission); + let deadline = + details.approvals.get(&origin).ok_or(Error::::NoPermission)?; + if let Some(d) = deadline { + let block_number = frame_system::Pallet::::block_number(); + ensure!(block_number <= *d, Error::::ApprovalExpired); + } } Ok(()) }) @@ -912,6 +933,8 @@ pub mod pallet { /// - `collection`: The collection of the item to be approved for delegated transfer. /// - `item`: The item of the item to be approved for delegated transfer. /// - `delegate`: The account to delegate permission to transfer the item. + /// - `maybe_deadline`: Optional deadline for the approval. Specified by providing the + /// number of blocks after which the approval will expire /// /// Emits `ApprovedTransfer` on success. /// @@ -922,6 +945,7 @@ pub mod pallet { collection: T::CollectionId, item: T::ItemId, delegate: AccountIdLookupOf, + maybe_deadline: Option<::BlockNumber>, ) -> DispatchResult { let maybe_check: Option = T::ForceOrigin::try_origin(origin) .map(|_| None) @@ -939,21 +963,27 @@ pub mod pallet { ensure!(permitted, Error::::NoPermission); } - details.approved = Some(delegate); + let now = frame_system::Pallet::::block_number(); + let deadline = maybe_deadline.map(|d| d.saturating_add(now)); + + details + .approvals + .try_insert(delegate.clone(), deadline) + .map_err(|_| Error::::ReachedApprovalLimit)?; Item::::insert(&collection, &item, &details); - let delegate = details.approved.expect("set as Some above; qed"); Self::deposit_event(Event::ApprovedTransfer { collection, item, owner: details.owner, delegate, + deadline, }); Ok(()) } - /// Cancel the prior approval for the transfer of an item by a delegate. + /// Cancel one of the transfer approvals for a specific item. /// /// Origin must be either: /// - the `Force` origin; @@ -962,9 +992,8 @@ pub mod pallet { /// /// Arguments: /// - `collection`: The collection of the item of whose approval will be cancelled. - /// - `item`: The item of the item of whose approval will be cancelled. - /// - `maybe_check_delegate`: If `Some` will ensure that the given account is the one to - /// which permission of transfer is delegated. + /// - `item`: The item of the collection of whose approval will be cancelled. + /// - `delegate`: The account that is going to loose their approval. /// /// Emits `ApprovalCancelled` on success. /// @@ -974,7 +1003,67 @@ pub mod pallet { origin: OriginFor, collection: T::CollectionId, item: T::ItemId, - maybe_check_delegate: Option>, + delegate: AccountIdLookupOf, + ) -> DispatchResult { + let maybe_check: Option = T::ForceOrigin::try_origin(origin) + .map(|_| None) + .or_else(|origin| ensure_signed(origin).map(Some).map_err(DispatchError::from))?; + + let delegate = T::Lookup::lookup(delegate)?; + + let collection_details = + Collection::::get(&collection).ok_or(Error::::UnknownCollection)?; + let mut details = + Item::::get(&collection, &item).ok_or(Error::::UnknownCollection)?; + + let maybe_deadline = + details.approvals.get(&delegate).ok_or(Error::::NotDelegate)?; + + let is_past_deadline = if let Some(deadline) = maybe_deadline { + let now = frame_system::Pallet::::block_number(); + now > *deadline + } else { + false + }; + + if !is_past_deadline { + if let Some(check) = maybe_check { + let permitted = check == collection_details.admin || check == details.owner; + ensure!(permitted, Error::::NoPermission); + } + } + + details.approvals.remove(&delegate); + Item::::insert(&collection, &item, &details); + Self::deposit_event(Event::ApprovalCancelled { + collection, + item, + owner: details.owner, + delegate, + }); + + Ok(()) + } + + /// Cancel all the approvals of a specific item. + /// + /// Origin must be either: + /// - the `Force` origin; + /// - `Signed` with the signer being the Admin of the `collection`; + /// - `Signed` with the signer being the Owner of the `item`; + /// + /// Arguments: + /// - `collection`: The collection of the item of whose approvals will be cleared. + /// - `item`: The item of the collection of whose approvals will be cleared. + /// + /// Emits `AllApprovalsCancelled` on success. + /// + /// Weight: `O(1)` + #[pallet::weight(T::WeightInfo::clear_all_transfer_approvals())] + pub fn clear_all_transfer_approvals( + origin: OriginFor, + collection: T::CollectionId, + item: T::ItemId, ) -> DispatchResult { let maybe_check: Option = T::ForceOrigin::try_origin(origin) .map(|_| None) @@ -988,18 +1077,13 @@ pub mod pallet { let permitted = check == collection_details.admin || check == details.owner; ensure!(permitted, Error::::NoPermission); } - let maybe_check_delegate = maybe_check_delegate.map(T::Lookup::lookup).transpose()?; - let old = details.approved.take().ok_or(Error::::NoDelegate)?; - if let Some(check_delegate) = maybe_check_delegate { - ensure!(check_delegate == old, Error::::WrongDelegate); - } + details.approvals.clear(); Item::::insert(&collection, &item, &details); - Self::deposit_event(Event::ApprovalCancelled { + Self::deposit_event(Event::AllApprovalsCancelled { collection, item, owner: details.owner, - delegate: old, }); Ok(()) diff --git a/frame/nfts/src/mock.rs b/frame/nfts/src/mock.rs index f3040faac5f40..ad7a94b3eed50 100644 --- a/frame/nfts/src/mock.rs +++ b/frame/nfts/src/mock.rs @@ -100,6 +100,7 @@ impl Config for Test { type StringLimit = ConstU32<50>; type KeyLimit = ConstU32<50>; type ValueLimit = ConstU32<50>; + type ApprovalsLimit = ConstU32<10>; type WeightInfo = (); #[cfg(feature = "runtime-benchmarks")] type Helper = (); diff --git a/frame/nfts/src/tests.rs b/frame/nfts/src/tests.rs index 2b20d124bd9ae..9fb29d0e95c26 100644 --- a/frame/nfts/src/tests.rs +++ b/frame/nfts/src/tests.rs @@ -70,6 +70,12 @@ fn attributes(collection: u32) -> Vec<(Option, Vec, Vec)> { s } +fn approvals(collection_id: u32, item_id: u32) -> Vec<(u64, Option)> { + let item = Item::::get(collection_id, item_id).unwrap(); + let s: Vec<_> = item.approvals.into_iter().collect(); + s +} + fn events() -> Vec> { let result = System::events() .into_iter() @@ -180,7 +186,7 @@ fn transfer_should_work() { assert_eq!(items(), vec![(3, 0, 42)]); assert_noop!(Nfts::transfer(Origin::signed(2), 0, 42, 4), Error::::NoPermission); - assert_ok!(Nfts::approve_transfer(Origin::signed(3), 0, 42, 2)); + assert_ok!(Nfts::approve_transfer(Origin::signed(3), 0, 42, 2, None)); assert_ok!(Nfts::transfer(Origin::signed(2), 0, 42, 4)); }); } @@ -535,12 +541,12 @@ fn approval_lifecycle_works() { new_test_ext().execute_with(|| { assert_ok!(Nfts::force_create(Origin::root(), 0, 1, true)); assert_ok!(Nfts::mint(Origin::signed(1), 0, 42, 2)); - assert_ok!(Nfts::approve_transfer(Origin::signed(2), 0, 42, 3)); + assert_ok!(Nfts::approve_transfer(Origin::signed(2), 0, 42, 3, None)); assert_ok!(Nfts::transfer(Origin::signed(3), 0, 42, 4)); assert_noop!(Nfts::transfer(Origin::signed(3), 0, 42, 3), Error::::NoPermission); - assert!(Item::::get(0, 42).unwrap().approved.is_none()); + assert!(Item::::get(0, 42).unwrap().approvals.is_empty()); - assert_ok!(Nfts::approve_transfer(Origin::signed(4), 0, 42, 2)); + assert_ok!(Nfts::approve_transfer(Origin::signed(4), 0, 42, 2, None)); assert_ok!(Nfts::transfer(Origin::signed(2), 0, 42, 2)); }); } @@ -551,29 +557,107 @@ fn cancel_approval_works() { assert_ok!(Nfts::force_create(Origin::root(), 0, 1, true)); assert_ok!(Nfts::mint(Origin::signed(1), 0, 42, 2)); - assert_ok!(Nfts::approve_transfer(Origin::signed(2), 0, 42, 3)); + assert_ok!(Nfts::approve_transfer(Origin::signed(2), 0, 42, 3, None)); assert_noop!( - Nfts::cancel_approval(Origin::signed(2), 1, 42, None), + Nfts::cancel_approval(Origin::signed(2), 1, 42, 3), Error::::UnknownCollection ); assert_noop!( - Nfts::cancel_approval(Origin::signed(2), 0, 43, None), + Nfts::cancel_approval(Origin::signed(2), 0, 43, 3), Error::::UnknownCollection ); assert_noop!( - Nfts::cancel_approval(Origin::signed(3), 0, 42, None), + Nfts::cancel_approval(Origin::signed(3), 0, 42, 3), Error::::NoPermission ); assert_noop!( - Nfts::cancel_approval(Origin::signed(2), 0, 42, Some(4)), - Error::::WrongDelegate + Nfts::cancel_approval(Origin::signed(2), 0, 42, 4), + Error::::NotDelegate ); - assert_ok!(Nfts::cancel_approval(Origin::signed(2), 0, 42, Some(3))); + assert_ok!(Nfts::cancel_approval(Origin::signed(2), 0, 42, 3)); + assert_noop!( + Nfts::cancel_approval(Origin::signed(2), 0, 42, 3), + Error::::NotDelegate + ); + + let current_block = 1; + System::set_block_number(current_block); + assert_ok!(Nfts::mint(Origin::signed(1), 0, 69, 2)); + // approval expires after 2 blocks. + assert_ok!(Nfts::approve_transfer(Origin::signed(2), 0, 42, 3, Some(2))); assert_noop!( - Nfts::cancel_approval(Origin::signed(2), 0, 42, None), - Error::::NoDelegate + Nfts::cancel_approval(Origin::signed(5), 0, 42, 3), + Error::::NoPermission ); + + System::set_block_number(current_block + 3); + // 5 can cancel the approval since the deadline has passed. + assert_ok!(Nfts::cancel_approval(Origin::signed(5), 0, 42, 3)); + assert_eq!(approvals(0, 69), vec![]); + }); +} + +#[test] +fn approving_multiple_accounts_works() { + new_test_ext().execute_with(|| { + assert_ok!(Nfts::force_create(Origin::root(), 0, 1, true)); + assert_ok!(Nfts::mint(Origin::signed(1), 0, 42, 2)); + + let current_block = 1; + System::set_block_number(current_block); + assert_ok!(Nfts::approve_transfer(Origin::signed(2), 0, 42, 3, None)); + assert_ok!(Nfts::approve_transfer(Origin::signed(2), 0, 42, 4, None)); + assert_ok!(Nfts::approve_transfer(Origin::signed(2), 0, 42, 5, Some(2))); + assert_eq!(approvals(0, 42), vec![(3, None), (4, None), (5, Some(current_block + 2))]); + + assert_ok!(Nfts::transfer(Origin::signed(4), 0, 42, 6)); + assert_noop!(Nfts::transfer(Origin::signed(3), 0, 42, 7), Error::::NoPermission); + assert_noop!(Nfts::transfer(Origin::signed(5), 0, 42, 8), Error::::NoPermission); + }); +} + +#[test] +fn approvals_limit_works() { + new_test_ext().execute_with(|| { + assert_ok!(Nfts::force_create(Origin::root(), 0, 1, true)); + assert_ok!(Nfts::mint(Origin::signed(1), 0, 42, 2)); + + for i in 3..13 { + assert_ok!(Nfts::approve_transfer(Origin::signed(2), 0, 42, i, None)); + } + // the limit is 10 + assert_noop!( + Nfts::approve_transfer(Origin::signed(2), 0, 42, 14, None), + Error::::ReachedApprovalLimit + ); + }); +} + +#[test] +fn approval_deadline_works() { + new_test_ext().execute_with(|| { + System::set_block_number(0); + assert!(System::block_number().is_zero()); + + assert_ok!(Nfts::force_create(Origin::root(), 0, 1, true)); + assert_ok!(Nfts::mint(Origin::signed(1), 0, 42, 2)); + + // the approval expires after the 2nd block. + assert_ok!(Nfts::approve_transfer(Origin::signed(2), 0, 42, 3, Some(2))); + + System::set_block_number(3); + assert_noop!(Nfts::transfer(Origin::signed(3), 0, 42, 4), Error::::ApprovalExpired); + System::set_block_number(1); + assert_ok!(Nfts::transfer(Origin::signed(3), 0, 42, 4)); + + assert_eq!(System::block_number(), 1); + // make a new approval with a deadline after 4 blocks, so it will expire after the 5th + // block. + assert_ok!(Nfts::approve_transfer(Origin::signed(4), 0, 42, 6, Some(4))); + // this should still work. + System::set_block_number(5); + assert_ok!(Nfts::transfer(Origin::signed(6), 0, 42, 5)); }); } @@ -583,24 +667,24 @@ fn cancel_approval_works_with_admin() { assert_ok!(Nfts::force_create(Origin::root(), 0, 1, true)); assert_ok!(Nfts::mint(Origin::signed(1), 0, 42, 2)); - assert_ok!(Nfts::approve_transfer(Origin::signed(2), 0, 42, 3)); + assert_ok!(Nfts::approve_transfer(Origin::signed(2), 0, 42, 3, None)); assert_noop!( - Nfts::cancel_approval(Origin::signed(1), 1, 42, None), + Nfts::cancel_approval(Origin::signed(1), 1, 42, 1), Error::::UnknownCollection ); assert_noop!( - Nfts::cancel_approval(Origin::signed(1), 0, 43, None), + Nfts::cancel_approval(Origin::signed(1), 0, 43, 1), Error::::UnknownCollection ); assert_noop!( - Nfts::cancel_approval(Origin::signed(1), 0, 42, Some(4)), - Error::::WrongDelegate + Nfts::cancel_approval(Origin::signed(1), 0, 42, 4), + Error::::NotDelegate ); - assert_ok!(Nfts::cancel_approval(Origin::signed(1), 0, 42, Some(3))); + assert_ok!(Nfts::cancel_approval(Origin::signed(1), 0, 42, 3)); assert_noop!( - Nfts::cancel_approval(Origin::signed(1), 0, 42, None), - Error::::NoDelegate + Nfts::cancel_approval(Origin::signed(1), 0, 42, 1), + Error::::NotDelegate ); }); } @@ -611,22 +695,47 @@ fn cancel_approval_works_with_force() { assert_ok!(Nfts::force_create(Origin::root(), 0, 1, true)); assert_ok!(Nfts::mint(Origin::signed(1), 0, 42, 2)); - assert_ok!(Nfts::approve_transfer(Origin::signed(2), 0, 42, 3)); + assert_ok!(Nfts::approve_transfer(Origin::signed(2), 0, 42, 3, None)); assert_noop!( - Nfts::cancel_approval(Origin::root(), 1, 42, None), + Nfts::cancel_approval(Origin::root(), 1, 42, 1), Error::::UnknownCollection ); assert_noop!( - Nfts::cancel_approval(Origin::root(), 0, 43, None), + Nfts::cancel_approval(Origin::root(), 0, 43, 1), Error::::UnknownCollection ); + assert_noop!(Nfts::cancel_approval(Origin::root(), 0, 42, 4), Error::::NotDelegate); + + assert_ok!(Nfts::cancel_approval(Origin::root(), 0, 42, 3)); + assert_noop!(Nfts::cancel_approval(Origin::root(), 0, 42, 1), Error::::NotDelegate); + }); +} + +#[test] +fn clear_all_transfer_approvals_works() { + new_test_ext().execute_with(|| { + assert_ok!(Nfts::force_create(Origin::root(), 0, 1, true)); + assert_ok!(Nfts::mint(Origin::signed(1), 0, 42, 2)); + + assert_ok!(Nfts::approve_transfer(Origin::signed(2), 0, 42, 3, None)); + assert_ok!(Nfts::approve_transfer(Origin::signed(2), 0, 42, 4, None)); + assert_noop!( - Nfts::cancel_approval(Origin::root(), 0, 42, Some(4)), - Error::::WrongDelegate + Nfts::clear_all_transfer_approvals(Origin::signed(3), 0, 42), + Error::::NoPermission ); - assert_ok!(Nfts::cancel_approval(Origin::root(), 0, 42, Some(3))); - assert_noop!(Nfts::cancel_approval(Origin::root(), 0, 42, None), Error::::NoDelegate); + assert_ok!(Nfts::clear_all_transfer_approvals(Origin::signed(2), 0, 42)); + + assert!(events().contains(&Event::::AllApprovalsCancelled { + collection: 0, + item: 42, + owner: 2, + })); + assert_eq!(approvals(0, 42), vec![]); + + assert_noop!(Nfts::transfer(Origin::signed(3), 0, 42, 5), Error::::NoPermission); + assert_noop!(Nfts::transfer(Origin::signed(4), 0, 42, 5), Error::::NoPermission); }); } diff --git a/frame/nfts/src/types.rs b/frame/nfts/src/types.rs index 1081ec8110288..db1c351c4a9c5 100644 --- a/frame/nfts/src/types.rs +++ b/frame/nfts/src/types.rs @@ -29,7 +29,7 @@ pub(super) type DepositBalanceOf = pub(super) type CollectionDetailsFor = CollectionDetails<::AccountId, DepositBalanceOf>; pub(super) type ItemDetailsFor = - ItemDetails<::AccountId, DepositBalanceOf>; + ItemDetails<::AccountId, DepositBalanceOf, ApprovalsOf>; pub(super) type ItemPrice = <>::Currency as Currency<::AccountId>>::Balance; @@ -84,11 +84,11 @@ impl CollectionDetails { /// Information concerning the ownership of a single unique item. #[derive(Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug, Default, TypeInfo, MaxEncodedLen)] -pub struct ItemDetails { +pub struct ItemDetails { /// The owner of this item. pub(super) owner: AccountId, /// The approved transferrer of this item, if one is set. - pub(super) approved: Option, + pub(super) approvals: Approvals, /// Whether the item can be transferred or not. pub(super) is_frozen: bool, /// The amount held in the pallet's default account for this item. Free-hold items will have diff --git a/frame/nfts/src/weights.rs b/frame/nfts/src/weights.rs index de518b8286665..2c90ab54fe9fb 100644 --- a/frame/nfts/src/weights.rs +++ b/frame/nfts/src/weights.rs @@ -18,12 +18,12 @@ //! Autogenerated weights for pallet_nfts //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2022-07-13, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! HOSTNAME: `test-bench-bot`, CPU: `Intel(R) Xeon(R) CPU @ 3.10GHz` +//! DATE: 2022-09-09, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! HOSTNAME: `bm3`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: -// target/production/substrate +// /home/benchbot/cargo_target_dir/production/substrate // benchmark // pallet // --steps=50 @@ -68,6 +68,7 @@ pub trait WeightInfo { fn clear_collection_metadata() -> Weight; fn approve_transfer() -> Weight; fn cancel_approval() -> Weight; + fn clear_all_transfer_approvals() -> Weight; fn set_accept_ownership() -> Weight; fn set_collection_max_supply() -> Weight; fn set_price() -> Weight; @@ -77,39 +78,39 @@ pub trait WeightInfo { /// Weights for pallet_nfts using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { - // Storage: Uniques Class (r:1 w:1) - // Storage: Uniques ClassAccount (r:0 w:1) + // Storage: Nfts Class (r:1 w:1) + // Storage: Nfts ClassAccount (r:0 w:1) fn create() -> Weight { - Weight::from_ref_time(33_075_000 as u64) + Weight::from_ref_time(33_018_000 as u64) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(2 as u64)) } - // Storage: Uniques Class (r:1 w:1) - // Storage: Uniques ClassAccount (r:0 w:1) + // Storage: Nfts Class (r:1 w:1) + // Storage: Nfts ClassAccount (r:0 w:1) fn force_create() -> Weight { - Weight::from_ref_time(19_528_000 as u64) + Weight::from_ref_time(20_957_000 as u64) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(2 as u64)) } - // Storage: Uniques Class (r:1 w:1) - // Storage: Uniques Asset (r:1 w:0) - // Storage: Uniques ClassAccount (r:0 w:1) - // Storage: Uniques Attribute (r:0 w:1000) - // Storage: Uniques ClassMetadataOf (r:0 w:1) - // Storage: Uniques InstanceMetadataOf (r:0 w:1000) - // Storage: Uniques CollectionMaxSupply (r:0 w:1) - // Storage: Uniques Account (r:0 w:20) + // Storage: Nfts Class (r:1 w:1) + // Storage: Nfts Asset (r:1 w:0) + // Storage: Nfts ClassAccount (r:0 w:1) + // Storage: Nfts Attribute (r:0 w:1000) + // Storage: Nfts ClassMetadataOf (r:0 w:1) + // Storage: Nfts InstanceMetadataOf (r:0 w:1000) + // Storage: Nfts CollectionMaxSupply (r:0 w:1) + // Storage: Nfts Account (r:0 w:20) /// The range of component `n` is `[0, 1000]`. /// The range of component `m` is `[0, 1000]`. /// The range of component `a` is `[0, 1000]`. fn destroy(n: u32, m: u32, a: u32, ) -> Weight { Weight::from_ref_time(0 as u64) - // Standard Error: 25_000 - .saturating_add(Weight::from_ref_time(13_639_000 as u64).saturating_mul(n as u64)) - // Standard Error: 25_000 - .saturating_add(Weight::from_ref_time(2_393_000 as u64).saturating_mul(m as u64)) - // Standard Error: 25_000 - .saturating_add(Weight::from_ref_time(2_217_000 as u64).saturating_mul(a as u64)) + // Standard Error: 12_000 + .saturating_add(Weight::from_ref_time(10_091_000 as u64).saturating_mul(n as u64)) + // Standard Error: 12_000 + .saturating_add(Weight::from_ref_time(1_748_000 as u64).saturating_mul(m as u64)) + // Standard Error: 12_000 + .saturating_add(Weight::from_ref_time(1_621_000 as u64).saturating_mul(a as u64)) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().reads((1 as u64).saturating_mul(n as u64))) .saturating_add(T::DbWeight::get().writes(4 as u64)) @@ -117,176 +118,183 @@ impl WeightInfo for SubstrateWeight { .saturating_add(T::DbWeight::get().writes((1 as u64).saturating_mul(m as u64))) .saturating_add(T::DbWeight::get().writes((1 as u64).saturating_mul(a as u64))) } - // Storage: Uniques Asset (r:1 w:1) - // Storage: Uniques Class (r:1 w:1) - // Storage: Uniques CollectionMaxSupply (r:1 w:0) - // Storage: Uniques Account (r:0 w:1) + // Storage: Nfts Asset (r:1 w:1) + // Storage: Nfts Class (r:1 w:1) + // Storage: Nfts CollectionMaxSupply (r:1 w:0) + // Storage: Nfts Account (r:0 w:1) fn mint() -> Weight { - Weight::from_ref_time(42_146_000 as u64) + Weight::from_ref_time(43_007_000 as u64) .saturating_add(T::DbWeight::get().reads(3 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } - // Storage: Uniques Class (r:1 w:1) - // Storage: Uniques Asset (r:1 w:1) - // Storage: Uniques Account (r:0 w:1) - // Storage: Uniques ItemPriceOf (r:0 w:1) + // Storage: Nfts Class (r:1 w:1) + // Storage: Nfts Asset (r:1 w:1) + // Storage: Nfts Account (r:0 w:1) + // Storage: Nfts ItemPriceOf (r:0 w:1) fn burn() -> Weight { - Weight::from_ref_time(42_960_000 as u64) + Weight::from_ref_time(43_922_000 as u64) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(4 as u64)) } - // Storage: Uniques Class (r:1 w:0) - // Storage: Uniques Asset (r:1 w:1) - // Storage: Uniques Account (r:0 w:2) - // Storage: Uniques ItemPriceOf (r:0 w:1) + // Storage: Nfts Class (r:1 w:0) + // Storage: Nfts Asset (r:1 w:1) + // Storage: Nfts Account (r:0 w:2) + // Storage: Nfts ItemPriceOf (r:0 w:1) fn transfer() -> Weight { - Weight::from_ref_time(33_025_000 as u64) + Weight::from_ref_time(33_951_000 as u64) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(4 as u64)) } - // Storage: Uniques Class (r:1 w:1) - // Storage: Uniques Asset (r:100 w:100) + // Storage: Nfts Class (r:1 w:1) + // Storage: Nfts Asset (r:102 w:102) /// The range of component `i` is `[0, 5000]`. fn redeposit(i: u32, ) -> Weight { Weight::from_ref_time(0 as u64) - // Standard Error: 24_000 - .saturating_add(Weight::from_ref_time(15_540_000 as u64).saturating_mul(i as u64)) + // Standard Error: 12_000 + .saturating_add(Weight::from_ref_time(11_194_000 as u64).saturating_mul(i as u64)) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().reads((1 as u64).saturating_mul(i as u64))) .saturating_add(T::DbWeight::get().writes(1 as u64)) .saturating_add(T::DbWeight::get().writes((1 as u64).saturating_mul(i as u64))) } - // Storage: Uniques Asset (r:1 w:1) - // Storage: Uniques Class (r:1 w:0) + // Storage: Nfts Asset (r:1 w:1) + // Storage: Nfts Class (r:1 w:0) fn freeze() -> Weight { - Weight::from_ref_time(25_194_000 as u64) + Weight::from_ref_time(26_745_000 as u64) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } - // Storage: Uniques Asset (r:1 w:1) - // Storage: Uniques Class (r:1 w:0) + // Storage: Nfts Asset (r:1 w:1) + // Storage: Nfts Class (r:1 w:0) fn thaw() -> Weight { - Weight::from_ref_time(25_397_000 as u64) + Weight::from_ref_time(27_466_000 as u64) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } - // Storage: Uniques Class (r:1 w:1) + // Storage: Nfts Class (r:1 w:1) fn freeze_collection() -> Weight { - Weight::from_ref_time(19_278_000 as u64) + Weight::from_ref_time(22_591_000 as u64) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } - // Storage: Uniques Class (r:1 w:1) + // Storage: Nfts Class (r:1 w:1) fn thaw_collection() -> Weight { - Weight::from_ref_time(19_304_000 as u64) + Weight::from_ref_time(22_392_000 as u64) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } - // Storage: Uniques OwnershipAcceptance (r:1 w:1) - // Storage: Uniques Class (r:1 w:1) - // Storage: Uniques ClassAccount (r:0 w:2) + // Storage: Nfts OwnershipAcceptance (r:1 w:1) + // Storage: Nfts Class (r:1 w:1) + // Storage: Nfts ClassAccount (r:0 w:2) fn transfer_ownership() -> Weight { - Weight::from_ref_time(28_615_000 as u64) + Weight::from_ref_time(31_202_000 as u64) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(4 as u64)) } - // Storage: Uniques Class (r:1 w:1) + // Storage: Nfts Class (r:1 w:1) fn set_team() -> Weight { - Weight::from_ref_time(19_943_000 as u64) + Weight::from_ref_time(23_063_000 as u64) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } - // Storage: Uniques Class (r:1 w:1) - // Storage: Uniques ClassAccount (r:0 w:1) + // Storage: Nfts Class (r:1 w:1) + // Storage: Nfts ClassAccount (r:0 w:1) fn force_item_status() -> Weight { - Weight::from_ref_time(22_583_000 as u64) + Weight::from_ref_time(25_598_000 as u64) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(2 as u64)) } - // Storage: Uniques Class (r:1 w:1) - // Storage: Uniques InstanceMetadataOf (r:1 w:0) - // Storage: Uniques Attribute (r:1 w:1) + // Storage: Nfts Class (r:1 w:1) + // Storage: Nfts InstanceMetadataOf (r:1 w:0) + // Storage: Nfts Attribute (r:1 w:1) fn set_attribute() -> Weight { - Weight::from_ref_time(47_520_000 as u64) + Weight::from_ref_time(48_684_000 as u64) .saturating_add(T::DbWeight::get().reads(3 as u64)) .saturating_add(T::DbWeight::get().writes(2 as u64)) } - // Storage: Uniques Class (r:1 w:1) - // Storage: Uniques InstanceMetadataOf (r:1 w:0) - // Storage: Uniques Attribute (r:1 w:1) + // Storage: Nfts Class (r:1 w:1) + // Storage: Nfts InstanceMetadataOf (r:1 w:0) + // Storage: Nfts Attribute (r:1 w:1) fn clear_attribute() -> Weight { - Weight::from_ref_time(45_316_000 as u64) + Weight::from_ref_time(47_267_000 as u64) .saturating_add(T::DbWeight::get().reads(3 as u64)) .saturating_add(T::DbWeight::get().writes(2 as u64)) } - // Storage: Uniques Class (r:1 w:1) - // Storage: Uniques InstanceMetadataOf (r:1 w:1) + // Storage: Nfts Class (r:1 w:1) + // Storage: Nfts InstanceMetadataOf (r:1 w:1) fn set_metadata() -> Weight { - Weight::from_ref_time(38_391_000 as u64) + Weight::from_ref_time(40_174_000 as u64) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(2 as u64)) } - // Storage: Uniques Class (r:1 w:1) - // Storage: Uniques InstanceMetadataOf (r:1 w:1) + // Storage: Nfts Class (r:1 w:1) + // Storage: Nfts InstanceMetadataOf (r:1 w:1) fn clear_metadata() -> Weight { - Weight::from_ref_time(38_023_000 as u64) + Weight::from_ref_time(41_611_000 as u64) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(2 as u64)) } - // Storage: Uniques Class (r:1 w:1) - // Storage: Uniques ClassMetadataOf (r:1 w:1) + // Storage: Nfts Class (r:1 w:1) + // Storage: Nfts ClassMetadataOf (r:1 w:1) fn set_collection_metadata() -> Weight { - Weight::from_ref_time(37_398_000 as u64) + Weight::from_ref_time(40_073_000 as u64) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(2 as u64)) } - // Storage: Uniques Class (r:1 w:0) - // Storage: Uniques ClassMetadataOf (r:1 w:1) + // Storage: Nfts Class (r:1 w:0) + // Storage: Nfts ClassMetadataOf (r:1 w:1) fn clear_collection_metadata() -> Weight { - Weight::from_ref_time(35_621_000 as u64) + Weight::from_ref_time(38_191_000 as u64) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } - // Storage: Uniques Class (r:1 w:0) - // Storage: Uniques Asset (r:1 w:1) + // Storage: Nfts Class (r:1 w:0) + // Storage: Nfts Asset (r:1 w:1) fn approve_transfer() -> Weight { - Weight::from_ref_time(25_856_000 as u64) + Weight::from_ref_time(29_461_000 as u64) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } - // Storage: Uniques Class (r:1 w:0) - // Storage: Uniques Asset (r:1 w:1) + // Storage: Nfts Class (r:1 w:0) + // Storage: Nfts Asset (r:1 w:1) fn cancel_approval() -> Weight { - Weight::from_ref_time(26_098_000 as u64) + Weight::from_ref_time(29_690_000 as u64) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } - // Storage: Uniques OwnershipAcceptance (r:1 w:1) + // Storage: Nfts Class (r:1 w:0) + // Storage: Nfts Asset (r:1 w:1) + fn clear_all_transfer_approvals() -> Weight { + Weight::from_ref_time(27_758_000 as u64) + .saturating_add(T::DbWeight::get().reads(2 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) + } + // Storage: Nfts OwnershipAcceptance (r:1 w:1) fn set_accept_ownership() -> Weight { - Weight::from_ref_time(24_076_000 as u64) + Weight::from_ref_time(26_425_000 as u64) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } - // Storage: Uniques CollectionMaxSupply (r:1 w:1) - // Storage: Uniques Class (r:1 w:0) + // Storage: Nfts CollectionMaxSupply (r:1 w:1) + // Storage: Nfts Class (r:1 w:0) fn set_collection_max_supply() -> Weight { - Weight::from_ref_time(22_035_000 as u64) + Weight::from_ref_time(24_533_000 as u64) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } - // Storage: Uniques Asset (r:1 w:0) - // Storage: Uniques ItemPriceOf (r:0 w:1) + // Storage: Nfts Asset (r:1 w:0) + // Storage: Nfts ItemPriceOf (r:0 w:1) fn set_price() -> Weight { - Weight::from_ref_time(22_534_000 as u64) + Weight::from_ref_time(24_745_000 as u64) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } - // Storage: Uniques Asset (r:1 w:1) - // Storage: Uniques ItemPriceOf (r:1 w:1) - // Storage: Uniques Class (r:1 w:0) - // Storage: Uniques Account (r:0 w:2) + // Storage: Nfts Asset (r:1 w:1) + // Storage: Nfts ItemPriceOf (r:1 w:1) + // Storage: Nfts Class (r:1 w:0) + // Storage: Nfts Account (r:0 w:2) fn buy_item() -> Weight { - Weight::from_ref_time(45_272_000 as u64) + Weight::from_ref_time(47_967_000 as u64) .saturating_add(T::DbWeight::get().reads(3 as u64)) .saturating_add(T::DbWeight::get().writes(4 as u64)) } @@ -294,39 +302,39 @@ impl WeightInfo for SubstrateWeight { // For backwards compatibility and tests impl WeightInfo for () { - // Storage: Uniques Class (r:1 w:1) - // Storage: Uniques ClassAccount (r:0 w:1) + // Storage: Nfts Class (r:1 w:1) + // Storage: Nfts ClassAccount (r:0 w:1) fn create() -> Weight { - Weight::from_ref_time(33_075_000 as u64) + Weight::from_ref_time(33_018_000 as u64) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(2 as u64)) } - // Storage: Uniques Class (r:1 w:1) - // Storage: Uniques ClassAccount (r:0 w:1) + // Storage: Nfts Class (r:1 w:1) + // Storage: Nfts ClassAccount (r:0 w:1) fn force_create() -> Weight { - Weight::from_ref_time(19_528_000 as u64) + Weight::from_ref_time(20_957_000 as u64) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(2 as u64)) } - // Storage: Uniques Class (r:1 w:1) - // Storage: Uniques Asset (r:1 w:0) - // Storage: Uniques ClassAccount (r:0 w:1) - // Storage: Uniques Attribute (r:0 w:1000) - // Storage: Uniques ClassMetadataOf (r:0 w:1) - // Storage: Uniques InstanceMetadataOf (r:0 w:1000) - // Storage: Uniques CollectionMaxSupply (r:0 w:1) - // Storage: Uniques Account (r:0 w:20) + // Storage: Nfts Class (r:1 w:1) + // Storage: Nfts Asset (r:1 w:0) + // Storage: Nfts ClassAccount (r:0 w:1) + // Storage: Nfts Attribute (r:0 w:1000) + // Storage: Nfts ClassMetadataOf (r:0 w:1) + // Storage: Nfts InstanceMetadataOf (r:0 w:1000) + // Storage: Nfts CollectionMaxSupply (r:0 w:1) + // Storage: Nfts Account (r:0 w:20) /// The range of component `n` is `[0, 1000]`. /// The range of component `m` is `[0, 1000]`. /// The range of component `a` is `[0, 1000]`. fn destroy(n: u32, m: u32, a: u32, ) -> Weight { Weight::from_ref_time(0 as u64) - // Standard Error: 25_000 - .saturating_add(Weight::from_ref_time(13_639_000 as u64).saturating_mul(n as u64)) - // Standard Error: 25_000 - .saturating_add(Weight::from_ref_time(2_393_000 as u64).saturating_mul(m as u64)) - // Standard Error: 25_000 - .saturating_add(Weight::from_ref_time(2_217_000 as u64).saturating_mul(a as u64)) + // Standard Error: 12_000 + .saturating_add(Weight::from_ref_time(10_091_000 as u64).saturating_mul(n as u64)) + // Standard Error: 12_000 + .saturating_add(Weight::from_ref_time(1_748_000 as u64).saturating_mul(m as u64)) + // Standard Error: 12_000 + .saturating_add(Weight::from_ref_time(1_621_000 as u64).saturating_mul(a as u64)) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().reads((1 as u64).saturating_mul(n as u64))) .saturating_add(RocksDbWeight::get().writes(4 as u64)) @@ -334,176 +342,183 @@ impl WeightInfo for () { .saturating_add(RocksDbWeight::get().writes((1 as u64).saturating_mul(m as u64))) .saturating_add(RocksDbWeight::get().writes((1 as u64).saturating_mul(a as u64))) } - // Storage: Uniques Asset (r:1 w:1) - // Storage: Uniques Class (r:1 w:1) - // Storage: Uniques CollectionMaxSupply (r:1 w:0) - // Storage: Uniques Account (r:0 w:1) + // Storage: Nfts Asset (r:1 w:1) + // Storage: Nfts Class (r:1 w:1) + // Storage: Nfts CollectionMaxSupply (r:1 w:0) + // Storage: Nfts Account (r:0 w:1) fn mint() -> Weight { - Weight::from_ref_time(42_146_000 as u64) + Weight::from_ref_time(43_007_000 as u64) .saturating_add(RocksDbWeight::get().reads(3 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } - // Storage: Uniques Class (r:1 w:1) - // Storage: Uniques Asset (r:1 w:1) - // Storage: Uniques Account (r:0 w:1) - // Storage: Uniques ItemPriceOf (r:0 w:1) + // Storage: Nfts Class (r:1 w:1) + // Storage: Nfts Asset (r:1 w:1) + // Storage: Nfts Account (r:0 w:1) + // Storage: Nfts ItemPriceOf (r:0 w:1) fn burn() -> Weight { - Weight::from_ref_time(42_960_000 as u64) + Weight::from_ref_time(43_922_000 as u64) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(4 as u64)) } - // Storage: Uniques Class (r:1 w:0) - // Storage: Uniques Asset (r:1 w:1) - // Storage: Uniques Account (r:0 w:2) - // Storage: Uniques ItemPriceOf (r:0 w:1) + // Storage: Nfts Class (r:1 w:0) + // Storage: Nfts Asset (r:1 w:1) + // Storage: Nfts Account (r:0 w:2) + // Storage: Nfts ItemPriceOf (r:0 w:1) fn transfer() -> Weight { - Weight::from_ref_time(33_025_000 as u64) + Weight::from_ref_time(33_951_000 as u64) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(4 as u64)) } - // Storage: Uniques Class (r:1 w:1) - // Storage: Uniques Asset (r:100 w:100) + // Storage: Nfts Class (r:1 w:1) + // Storage: Nfts Asset (r:102 w:102) /// The range of component `i` is `[0, 5000]`. fn redeposit(i: u32, ) -> Weight { Weight::from_ref_time(0 as u64) - // Standard Error: 24_000 - .saturating_add(Weight::from_ref_time(15_540_000 as u64).saturating_mul(i as u64)) + // Standard Error: 12_000 + .saturating_add(Weight::from_ref_time(11_194_000 as u64).saturating_mul(i as u64)) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().reads((1 as u64).saturating_mul(i as u64))) .saturating_add(RocksDbWeight::get().writes(1 as u64)) .saturating_add(RocksDbWeight::get().writes((1 as u64).saturating_mul(i as u64))) } - // Storage: Uniques Asset (r:1 w:1) - // Storage: Uniques Class (r:1 w:0) + // Storage: Nfts Asset (r:1 w:1) + // Storage: Nfts Class (r:1 w:0) fn freeze() -> Weight { - Weight::from_ref_time(25_194_000 as u64) + Weight::from_ref_time(26_745_000 as u64) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } - // Storage: Uniques Asset (r:1 w:1) - // Storage: Uniques Class (r:1 w:0) + // Storage: Nfts Asset (r:1 w:1) + // Storage: Nfts Class (r:1 w:0) fn thaw() -> Weight { - Weight::from_ref_time(25_397_000 as u64) + Weight::from_ref_time(27_466_000 as u64) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } - // Storage: Uniques Class (r:1 w:1) + // Storage: Nfts Class (r:1 w:1) fn freeze_collection() -> Weight { - Weight::from_ref_time(19_278_000 as u64) + Weight::from_ref_time(22_591_000 as u64) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } - // Storage: Uniques Class (r:1 w:1) + // Storage: Nfts Class (r:1 w:1) fn thaw_collection() -> Weight { - Weight::from_ref_time(19_304_000 as u64) + Weight::from_ref_time(22_392_000 as u64) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } - // Storage: Uniques OwnershipAcceptance (r:1 w:1) - // Storage: Uniques Class (r:1 w:1) - // Storage: Uniques ClassAccount (r:0 w:2) + // Storage: Nfts OwnershipAcceptance (r:1 w:1) + // Storage: Nfts Class (r:1 w:1) + // Storage: Nfts ClassAccount (r:0 w:2) fn transfer_ownership() -> Weight { - Weight::from_ref_time(28_615_000 as u64) + Weight::from_ref_time(31_202_000 as u64) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(4 as u64)) } - // Storage: Uniques Class (r:1 w:1) + // Storage: Nfts Class (r:1 w:1) fn set_team() -> Weight { - Weight::from_ref_time(19_943_000 as u64) + Weight::from_ref_time(23_063_000 as u64) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } - // Storage: Uniques Class (r:1 w:1) - // Storage: Uniques ClassAccount (r:0 w:1) + // Storage: Nfts Class (r:1 w:1) + // Storage: Nfts ClassAccount (r:0 w:1) fn force_item_status() -> Weight { - Weight::from_ref_time(22_583_000 as u64) + Weight::from_ref_time(25_598_000 as u64) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(2 as u64)) } - // Storage: Uniques Class (r:1 w:1) - // Storage: Uniques InstanceMetadataOf (r:1 w:0) - // Storage: Uniques Attribute (r:1 w:1) + // Storage: Nfts Class (r:1 w:1) + // Storage: Nfts InstanceMetadataOf (r:1 w:0) + // Storage: Nfts Attribute (r:1 w:1) fn set_attribute() -> Weight { - Weight::from_ref_time(47_520_000 as u64) + Weight::from_ref_time(48_684_000 as u64) .saturating_add(RocksDbWeight::get().reads(3 as u64)) .saturating_add(RocksDbWeight::get().writes(2 as u64)) } - // Storage: Uniques Class (r:1 w:1) - // Storage: Uniques InstanceMetadataOf (r:1 w:0) - // Storage: Uniques Attribute (r:1 w:1) + // Storage: Nfts Class (r:1 w:1) + // Storage: Nfts InstanceMetadataOf (r:1 w:0) + // Storage: Nfts Attribute (r:1 w:1) fn clear_attribute() -> Weight { - Weight::from_ref_time(45_316_000 as u64) + Weight::from_ref_time(47_267_000 as u64) .saturating_add(RocksDbWeight::get().reads(3 as u64)) .saturating_add(RocksDbWeight::get().writes(2 as u64)) } - // Storage: Uniques Class (r:1 w:1) - // Storage: Uniques InstanceMetadataOf (r:1 w:1) + // Storage: Nfts Class (r:1 w:1) + // Storage: Nfts InstanceMetadataOf (r:1 w:1) fn set_metadata() -> Weight { - Weight::from_ref_time(38_391_000 as u64) + Weight::from_ref_time(40_174_000 as u64) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(2 as u64)) } - // Storage: Uniques Class (r:1 w:1) - // Storage: Uniques InstanceMetadataOf (r:1 w:1) + // Storage: Nfts Class (r:1 w:1) + // Storage: Nfts InstanceMetadataOf (r:1 w:1) fn clear_metadata() -> Weight { - Weight::from_ref_time(38_023_000 as u64) + Weight::from_ref_time(41_611_000 as u64) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(2 as u64)) } - // Storage: Uniques Class (r:1 w:1) - // Storage: Uniques ClassMetadataOf (r:1 w:1) + // Storage: Nfts Class (r:1 w:1) + // Storage: Nfts ClassMetadataOf (r:1 w:1) fn set_collection_metadata() -> Weight { - Weight::from_ref_time(37_398_000 as u64) + Weight::from_ref_time(40_073_000 as u64) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(2 as u64)) } - // Storage: Uniques Class (r:1 w:0) - // Storage: Uniques ClassMetadataOf (r:1 w:1) + // Storage: Nfts Class (r:1 w:0) + // Storage: Nfts ClassMetadataOf (r:1 w:1) fn clear_collection_metadata() -> Weight { - Weight::from_ref_time(35_621_000 as u64) + Weight::from_ref_time(38_191_000 as u64) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } - // Storage: Uniques Class (r:1 w:0) - // Storage: Uniques Asset (r:1 w:1) + // Storage: Nfts Class (r:1 w:0) + // Storage: Nfts Asset (r:1 w:1) fn approve_transfer() -> Weight { - Weight::from_ref_time(25_856_000 as u64) + Weight::from_ref_time(29_461_000 as u64) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } - // Storage: Uniques Class (r:1 w:0) - // Storage: Uniques Asset (r:1 w:1) + // Storage: Nfts Class (r:1 w:0) + // Storage: Nfts Asset (r:1 w:1) fn cancel_approval() -> Weight { - Weight::from_ref_time(26_098_000 as u64) + Weight::from_ref_time(29_690_000 as u64) + .saturating_add(RocksDbWeight::get().reads(2 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) + } + // Storage: Nfts Class (r:1 w:0) + // Storage: Nfts Asset (r:1 w:1) + fn clear_all_transfer_approvals() -> Weight { + Weight::from_ref_time(27_758_000 as u64) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } - // Storage: Uniques OwnershipAcceptance (r:1 w:1) + // Storage: Nfts OwnershipAcceptance (r:1 w:1) fn set_accept_ownership() -> Weight { - Weight::from_ref_time(24_076_000 as u64) + Weight::from_ref_time(26_425_000 as u64) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } - // Storage: Uniques CollectionMaxSupply (r:1 w:1) - // Storage: Uniques Class (r:1 w:0) + // Storage: Nfts CollectionMaxSupply (r:1 w:1) + // Storage: Nfts Class (r:1 w:0) fn set_collection_max_supply() -> Weight { - Weight::from_ref_time(22_035_000 as u64) + Weight::from_ref_time(24_533_000 as u64) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } - // Storage: Uniques Asset (r:1 w:0) - // Storage: Uniques ItemPriceOf (r:0 w:1) + // Storage: Nfts Asset (r:1 w:0) + // Storage: Nfts ItemPriceOf (r:0 w:1) fn set_price() -> Weight { - Weight::from_ref_time(22_534_000 as u64) + Weight::from_ref_time(24_745_000 as u64) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } - // Storage: Uniques Asset (r:1 w:1) - // Storage: Uniques ItemPriceOf (r:1 w:1) - // Storage: Uniques Class (r:1 w:0) - // Storage: Uniques Account (r:0 w:2) + // Storage: Nfts Asset (r:1 w:1) + // Storage: Nfts ItemPriceOf (r:1 w:1) + // Storage: Nfts Class (r:1 w:0) + // Storage: Nfts Account (r:0 w:2) fn buy_item() -> Weight { - Weight::from_ref_time(45_272_000 as u64) + Weight::from_ref_time(47_967_000 as u64) .saturating_add(RocksDbWeight::get().reads(3 as u64)) .saturating_add(RocksDbWeight::get().writes(4 as u64)) } From 4930d817d057f936d993860199223f9001043435 Mon Sep 17 00:00:00 2001 From: Keith Yeung Date: Thu, 15 Sep 2022 13:23:17 +0800 Subject: [PATCH 005/101] Fixes --- frame/nfts/src/lib.rs | 2 +- frame/nfts/src/mock.rs | 8 ++++---- frame/nfts/src/tests.rs | 6 +++--- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/frame/nfts/src/lib.rs b/frame/nfts/src/lib.rs index 14691c21a0ef2..26ab16871e879 100644 --- a/frame/nfts/src/lib.rs +++ b/frame/nfts/src/lib.rs @@ -90,7 +90,7 @@ pub mod pallet { /// The module configuration trait. pub trait Config: frame_system::Config { /// The overarching event type. - type Event: From> + IsType<::Event>; + type RuntimeEvent: From> + IsType<::RuntimeEvent>; /// Identifier for the collection of item. type CollectionId: Member + Parameter + MaxEncodedLen + Copy; diff --git a/frame/nfts/src/mock.rs b/frame/nfts/src/mock.rs index ad7a94b3eed50..bfa6c185ed78c 100644 --- a/frame/nfts/src/mock.rs +++ b/frame/nfts/src/mock.rs @@ -50,7 +50,7 @@ impl frame_system::Config for Test { type BlockWeights = (); type BlockLength = (); type Origin = Origin; - type Call = Call; + type RuntimeCall = RuntimeCall; type Index = u64; type BlockNumber = u64; type Hash = H256; @@ -58,7 +58,7 @@ impl frame_system::Config for Test { type AccountId = u64; type Lookup = IdentityLookup; type Header = Header; - type Event = Event; + type RuntimeEvent = RuntimeEvent; type BlockHashCount = ConstU64<250>; type DbWeight = (); type Version = (); @@ -75,7 +75,7 @@ impl frame_system::Config for Test { impl pallet_balances::Config for Test { type Balance = u64; type DustRemoval = (); - type Event = Event; + type RuntimeEvent = RuntimeEvent; type ExistentialDeposit = ConstU64<1>; type AccountStore = System; type WeightInfo = (); @@ -85,7 +85,7 @@ impl pallet_balances::Config for Test { } impl Config for Test { - type Event = Event; + type RuntimeEvent = RuntimeEvent; type CollectionId = u32; type ItemId = u32; type Currency = Balances; diff --git a/frame/nfts/src/tests.rs b/frame/nfts/src/tests.rs index 9fb29d0e95c26..19d24f4924d46 100644 --- a/frame/nfts/src/tests.rs +++ b/frame/nfts/src/tests.rs @@ -80,7 +80,7 @@ fn events() -> Vec> { let result = System::events() .into_iter() .map(|r| r.event) - .filter_map(|e| if let mock::Event::Nfts(inner) = e { Some(inner) } else { None }) + .filter_map(|e| if let mock::RuntimeEvent::Nfts(inner) = e { Some(inner) } else { None }) .collect::>(); System::reset_events(); @@ -933,7 +933,7 @@ fn buy_item_should_work() { // freeze collection assert_ok!(Nfts::freeze_collection(Origin::signed(user_1), collection_id)); - let buy_item_call = mock::Call::Nfts(crate::Call::::buy_item { + let buy_item_call = mock::RuntimeCall::Nfts(crate::Call::::buy_item { collection: collection_id, item: item_3, bid_price: price_1, @@ -945,7 +945,7 @@ fn buy_item_should_work() { // freeze item assert_ok!(Nfts::freeze(Origin::signed(user_1), collection_id, item_3)); - let buy_item_call = mock::Call::Nfts(crate::Call::::buy_item { + let buy_item_call = mock::RuntimeCall::Nfts(crate::Call::::buy_item { collection: collection_id, item: item_3, bid_price: price_1, From f369ba95b223d4fd908bd44cbc20e12d7dfc3448 Mon Sep 17 00:00:00 2001 From: Keith Yeung Date: Thu, 15 Sep 2022 13:25:39 +0800 Subject: [PATCH 006/101] cargo fmt --- frame/nfts/src/lib.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/frame/nfts/src/lib.rs b/frame/nfts/src/lib.rs index 26ab16871e879..4297752348d43 100644 --- a/frame/nfts/src/lib.rs +++ b/frame/nfts/src/lib.rs @@ -90,7 +90,8 @@ pub mod pallet { /// The module configuration trait. pub trait Config: frame_system::Config { /// The overarching event type. - type RuntimeEvent: From> + IsType<::RuntimeEvent>; + type RuntimeEvent: From> + + IsType<::RuntimeEvent>; /// Identifier for the collection of item. type CollectionId: Member + Parameter + MaxEncodedLen + Copy; From ae5561efbc06dc73bb80e6d873694d0edac2b67e Mon Sep 17 00:00:00 2001 From: Keith Yeung Date: Thu, 15 Sep 2022 14:22:47 +0800 Subject: [PATCH 007/101] Fixes --- bin/node/runtime/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index 5d4bbf3ba244f..5e2e7141508e3 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -1482,7 +1482,7 @@ impl pallet_uniques::Config for Runtime { } impl pallet_nfts::Config for Runtime { - type Event = Event; + type RuntimeEvent = RuntimeEvent; type CollectionId = u32; type ItemId = u32; type Currency = Balances; From f34b0ffffc1e4f6b57b97dedd11778f1c0beee62 Mon Sep 17 00:00:00 2001 From: Keith Yeung Date: Thu, 15 Sep 2022 14:37:38 +0800 Subject: [PATCH 008/101] Fixes --- frame/nfts/src/benchmarking.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/frame/nfts/src/benchmarking.rs b/frame/nfts/src/benchmarking.rs index ca38851222e8d..7368446e593df 100644 --- a/frame/nfts/src/benchmarking.rs +++ b/frame/nfts/src/benchmarking.rs @@ -126,9 +126,9 @@ fn add_item_attribute, I: 'static>( (key, caller, caller_lookup) } -fn assert_last_event, I: 'static>(generic_event: >::Event) { +fn assert_last_event, I: 'static>(generic_event: >::RuntimeEvent) { let events = frame_system::Pallet::::events(); - let system_event: ::Event = generic_event.into(); + let system_event: ::RuntimeEvent = generic_event.into(); // compare to the last event record let frame_system::EventRecord { event, .. } = &events[events.len() - 1]; assert_eq!(event, &system_event); From d09df86f269db78ab622032f4f4e9152776f7770 Mon Sep 17 00:00:00 2001 From: Jegor Sidorenko Date: Mon, 19 Sep 2022 17:45:28 +0300 Subject: [PATCH 009/101] Fix CI --- frame/nfts/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/frame/nfts/src/lib.rs b/frame/nfts/src/lib.rs index 4297752348d43..cdb098d2eceed 100644 --- a/frame/nfts/src/lib.rs +++ b/frame/nfts/src/lib.rs @@ -109,9 +109,9 @@ pub mod pallet { /// Standard collection creation is only allowed if the origin attempting it and the /// collection are in this set. type CreateOrigin: EnsureOriginWithArg< - Success = Self::AccountId, Self::Origin, Self::CollectionId, + Success = Self::AccountId, >; /// Locker trait to enable Locking mechanism downstream. From 3ab3356471c61ed9a5c92ffb166264d80ba2c785 Mon Sep 17 00:00:00 2001 From: Sergej Sakac <73715684+Szegoo@users.noreply.github.com> Date: Fri, 23 Sep 2022 10:37:15 +0200 Subject: [PATCH 010/101] Nfts: Fix Auto-Increment (#12223) * commit * passing benchmarks * clean up * sync * runtime implementation * fix * fmt * fix benchmark * cfg * remove try-increment-id * remove unused error * impl Incrementable for unsigned types * clean up * fix in tests * not needed anymore * Use OptionQuery Co-authored-by: Keith Yeung --- frame/nfts/src/benchmarking.rs | 13 +++----- frame/nfts/src/functions.rs | 15 ++++++++++ frame/nfts/src/lib.rs | 48 ++++++++++++++++++++++++------ frame/nfts/src/tests.rs | 54 +++++++++++++++++----------------- 4 files changed, 85 insertions(+), 45 deletions(-) diff --git a/frame/nfts/src/benchmarking.rs b/frame/nfts/src/benchmarking.rs index 7368446e593df..eca404df2f142 100644 --- a/frame/nfts/src/benchmarking.rs +++ b/frame/nfts/src/benchmarking.rs @@ -42,13 +42,8 @@ fn create_collection, I: 'static>( let caller_lookup = T::Lookup::unlookup(caller.clone()); let collection = T::Helper::collection(0); T::Currency::make_free_balance_be(&caller, DepositBalanceOf::::max_value()); - assert!(Nfts::::force_create( - SystemOrigin::Root.into(), - collection, - caller_lookup.clone(), - false, - ) - .is_ok()); + assert!(Nfts::::force_create(SystemOrigin::Root.into(), caller_lookup.clone(), false,) + .is_ok()); (collection, caller, caller_lookup) } @@ -142,7 +137,7 @@ benchmarks_instance_pallet! { whitelist_account!(caller); let admin = T::Lookup::unlookup(caller.clone()); T::Currency::make_free_balance_be(&caller, DepositBalanceOf::::max_value()); - let call = Call::::create { collection, admin }; + let call = Call::::create { admin }; }: { call.dispatch_bypass_filter(origin)? } verify { assert_last_event::(Event::Created { collection: T::Helper::collection(0), creator: caller.clone(), owner: caller }.into()); @@ -151,7 +146,7 @@ benchmarks_instance_pallet! { force_create { let caller: T::AccountId = whitelisted_caller(); let caller_lookup = T::Lookup::unlookup(caller.clone()); - }: _(SystemOrigin::Root, T::Helper::collection(0), caller_lookup, true) + }: _(SystemOrigin::Root, caller_lookup, true) verify { assert_last_event::(Event::ForceCreated { collection: T::Helper::collection(0), owner: caller }.into()); } diff --git a/frame/nfts/src/functions.rs b/frame/nfts/src/functions.rs index 27ab752dbabf6..f935a5b2eba90 100644 --- a/frame/nfts/src/functions.rs +++ b/frame/nfts/src/functions.rs @@ -94,7 +94,12 @@ impl, I: 'static> Pallet { }, ); + let next_id = collection.increment(); + CollectionAccount::::insert(&owner, &collection, ()); + NextCollectionId::::set(Some(next_id)); + + Self::deposit_event(Event::NextCollectionIdIncremented { next_id }); Self::deposit_event(event); Ok(()) } @@ -284,4 +289,14 @@ impl, I: 'static> Pallet { Ok(()) } + + #[cfg(any(test, feature = "runtime-benchmarks"))] + pub fn set_next_id(id: T::CollectionId) { + NextCollectionId::::set(Some(id)); + } + + #[cfg(test)] + pub fn get_next_id() -> T::CollectionId { + NextCollectionId::::get().unwrap_or(T::CollectionId::initial_value()) + } } diff --git a/frame/nfts/src/lib.rs b/frame/nfts/src/lib.rs index cdb098d2eceed..2cde6e5e1ab5d 100644 --- a/frame/nfts/src/lib.rs +++ b/frame/nfts/src/lib.rs @@ -61,6 +61,29 @@ pub use weights::WeightInfo; type AccountIdLookupOf = <::Lookup as StaticLookup>::Source; +pub trait Incrementable { + fn increment(&self) -> Self; + fn initial_value() -> Self; +} + +macro_rules! impl_incrementable { + ($($type:ty),+) => { + $( + impl Incrementable for $type { + fn increment(&self) -> Self { + self.saturating_add(1) + } + + fn initial_value() -> Self { + 0 + } + } + )+ + }; +} + +impl_incrementable!(u8, u16, u32, u64, u128, i8, i16, i32, i64, i128); + #[frame_support::pallet] pub mod pallet { use super::*; @@ -94,7 +117,7 @@ pub mod pallet { + IsType<::RuntimeEvent>; /// Identifier for the collection of item. - type CollectionId: Member + Parameter + MaxEncodedLen + Copy; + type CollectionId: Member + Parameter + MaxEncodedLen + Copy + Incrementable; /// The type used to identify a unique item within a collection. type ItemId: Member + Parameter + MaxEncodedLen + Copy; @@ -278,6 +301,12 @@ pub mod pallet { pub(super) type CollectionMaxSupply, I: 'static = ()> = StorageMap<_, Blake2_128Concat, T::CollectionId, u32, OptionQuery>; + #[pallet::storage] + /// Stores the `CollectionId` that is going to be used for the next collection. + /// This gets incremented by 1 whenever a new collection is created. + pub(super) type NextCollectionId, I: 'static = ()> = + StorageValue<_, T::CollectionId, OptionQuery>; + #[pallet::event] #[pallet::generate_deposit(pub(super) fn deposit_event)] pub enum Event, I: 'static = ()> { @@ -372,6 +401,8 @@ pub mod pallet { OwnershipAcceptanceChanged { who: T::AccountId, maybe_collection: Option }, /// Max supply has been set for a collection. CollectionMaxSupplySet { collection: T::CollectionId, max_supply: u32 }, + /// Event gets emmited when the `NextCollectionId` gets incremented. + NextCollectionIdIncremented { next_id: T::CollectionId }, /// The price was set for the instance. ItemPriceSet { collection: T::CollectionId, @@ -458,7 +489,6 @@ pub mod pallet { /// `ItemDeposit` funds of sender are reserved. /// /// Parameters: - /// - `collection`: The identifier of the new collection. This must not be currently in use. /// - `admin`: The admin of this collection. The admin is the initial address of each /// member of the collection's admin team. /// @@ -466,11 +496,10 @@ pub mod pallet { /// /// Weight: `O(1)` #[pallet::weight(T::WeightInfo::create())] - pub fn create( - origin: OriginFor, - collection: T::CollectionId, - admin: AccountIdLookupOf, - ) -> DispatchResult { + pub fn create(origin: OriginFor, admin: AccountIdLookupOf) -> DispatchResult { + let collection = + NextCollectionId::::get().unwrap_or(T::CollectionId::initial_value()); + let owner = T::CreateOrigin::ensure_origin(origin, &collection)?; let admin = T::Lookup::lookup(admin)?; @@ -492,7 +521,6 @@ pub mod pallet { /// /// Unlike `create`, no funds are reserved. /// - /// - `collection`: The identifier of the new item. This must not be currently in use. /// - `owner`: The owner of this collection of items. The owner has full superuser /// permissions /// over this item, but may later change and configure the permissions using @@ -504,13 +532,15 @@ pub mod pallet { #[pallet::weight(T::WeightInfo::force_create())] pub fn force_create( origin: OriginFor, - collection: T::CollectionId, owner: AccountIdLookupOf, free_holding: bool, ) -> DispatchResult { T::ForceOrigin::ensure_origin(origin)?; let owner = T::Lookup::lookup(owner)?; + let collection = + NextCollectionId::::get().unwrap_or(T::CollectionId::initial_value()); + Self::do_create_collection( collection, owner.clone(), diff --git a/frame/nfts/src/tests.rs b/frame/nfts/src/tests.rs index 19d24f4924d46..f5d44bef7f886 100644 --- a/frame/nfts/src/tests.rs +++ b/frame/nfts/src/tests.rs @@ -98,12 +98,12 @@ fn basic_setup_works() { #[test] fn basic_minting_should_work() { new_test_ext().execute_with(|| { - assert_ok!(Nfts::force_create(Origin::root(), 0, 1, true)); + assert_ok!(Nfts::force_create(Origin::root(), 1, true)); assert_eq!(collections(), vec![(1, 0)]); assert_ok!(Nfts::mint(Origin::signed(1), 0, 42, 1)); assert_eq!(items(), vec![(1, 0, 42)]); - assert_ok!(Nfts::force_create(Origin::root(), 1, 2, true)); + assert_ok!(Nfts::force_create(Origin::root(), 2, true)); assert_eq!(collections(), vec![(1, 0), (2, 1)]); assert_ok!(Nfts::mint(Origin::signed(2), 1, 69, 1)); assert_eq!(items(), vec![(1, 0, 42), (1, 1, 69)]); @@ -114,7 +114,7 @@ fn basic_minting_should_work() { fn lifecycle_should_work() { new_test_ext().execute_with(|| { Balances::make_free_balance_be(&1, 100); - assert_ok!(Nfts::create(Origin::signed(1), 0, 1)); + assert_ok!(Nfts::create(Origin::signed(1), 1)); assert_eq!(Balances::reserved_balance(&1), 2); assert_eq!(collections(), vec![(1, 0)]); assert_ok!(Nfts::set_collection_metadata(Origin::signed(1), 0, bvec![0, 0], false)); @@ -157,7 +157,7 @@ fn lifecycle_should_work() { fn destroy_with_bad_witness_should_not_work() { new_test_ext().execute_with(|| { Balances::make_free_balance_be(&1, 100); - assert_ok!(Nfts::create(Origin::signed(1), 0, 1)); + assert_ok!(Nfts::create(Origin::signed(1), 1)); let w = Collection::::get(0).unwrap().destroy_witness(); assert_ok!(Nfts::mint(Origin::signed(1), 0, 42, 1)); @@ -168,7 +168,7 @@ fn destroy_with_bad_witness_should_not_work() { #[test] fn mint_should_work() { new_test_ext().execute_with(|| { - assert_ok!(Nfts::force_create(Origin::root(), 0, 1, true)); + assert_ok!(Nfts::force_create(Origin::root(), 1, true)); assert_ok!(Nfts::mint(Origin::signed(1), 0, 42, 1)); assert_eq!(Nfts::owner(0, 42).unwrap(), 1); assert_eq!(collections(), vec![(1, 0)]); @@ -179,7 +179,7 @@ fn mint_should_work() { #[test] fn transfer_should_work() { new_test_ext().execute_with(|| { - assert_ok!(Nfts::force_create(Origin::root(), 0, 1, true)); + assert_ok!(Nfts::force_create(Origin::root(), 1, true)); assert_ok!(Nfts::mint(Origin::signed(1), 0, 42, 2)); assert_ok!(Nfts::transfer(Origin::signed(2), 0, 42, 3)); @@ -194,7 +194,7 @@ fn transfer_should_work() { #[test] fn freezing_should_work() { new_test_ext().execute_with(|| { - assert_ok!(Nfts::force_create(Origin::root(), 0, 1, true)); + assert_ok!(Nfts::force_create(Origin::root(), 1, true)); assert_ok!(Nfts::mint(Origin::signed(1), 0, 42, 1)); assert_ok!(Nfts::freeze(Origin::signed(1), 0, 42)); assert_noop!(Nfts::transfer(Origin::signed(1), 0, 42, 2), Error::::Frozen); @@ -211,7 +211,7 @@ fn freezing_should_work() { #[test] fn origin_guards_should_work() { new_test_ext().execute_with(|| { - assert_ok!(Nfts::force_create(Origin::root(), 0, 1, true)); + assert_ok!(Nfts::force_create(Origin::root(), 1, true)); assert_ok!(Nfts::mint(Origin::signed(1), 0, 42, 1)); Balances::make_free_balance_be(&2, 100); @@ -236,7 +236,7 @@ fn transfer_owner_should_work() { Balances::make_free_balance_be(&1, 100); Balances::make_free_balance_be(&2, 100); Balances::make_free_balance_be(&3, 100); - assert_ok!(Nfts::create(Origin::signed(1), 0, 1)); + assert_ok!(Nfts::create(Origin::signed(1), 1)); assert_eq!(collections(), vec![(1, 0)]); assert_noop!(Nfts::transfer_ownership(Origin::signed(1), 0, 2), Error::::Unaccepted); assert_ok!(Nfts::set_accept_ownership(Origin::signed(2), Some(0))); @@ -275,7 +275,7 @@ fn transfer_owner_should_work() { #[test] fn set_team_should_work() { new_test_ext().execute_with(|| { - assert_ok!(Nfts::force_create(Origin::root(), 0, 1, true)); + assert_ok!(Nfts::force_create(Origin::root(), 1, true)); assert_ok!(Nfts::set_team(Origin::signed(1), 0, 2, 3, 4)); assert_ok!(Nfts::mint(Origin::signed(2), 0, 42, 2)); @@ -294,7 +294,7 @@ fn set_collection_metadata_should_work() { Nfts::set_collection_metadata(Origin::signed(1), 0, bvec![0u8; 20], false), Error::::UnknownCollection, ); - assert_ok!(Nfts::force_create(Origin::root(), 0, 1, false)); + assert_ok!(Nfts::force_create(Origin::root(), 1, false)); // Cannot add metadata to unowned item assert_noop!( Nfts::set_collection_metadata(Origin::signed(2), 0, bvec![0u8; 20], false), @@ -351,7 +351,7 @@ fn set_item_metadata_should_work() { Balances::make_free_balance_be(&1, 30); // Cannot add metadata to unknown item - assert_ok!(Nfts::force_create(Origin::root(), 0, 1, false)); + assert_ok!(Nfts::force_create(Origin::root(), 1, false)); assert_ok!(Nfts::mint(Origin::signed(1), 0, 42, 1)); // Cannot add metadata to unowned item assert_noop!( @@ -404,7 +404,7 @@ fn set_attribute_should_work() { new_test_ext().execute_with(|| { Balances::make_free_balance_be(&1, 100); - assert_ok!(Nfts::force_create(Origin::root(), 0, 1, false)); + assert_ok!(Nfts::force_create(Origin::root(), 1, false)); assert_ok!(Nfts::set_attribute(Origin::signed(1), 0, None, bvec![0], bvec![0])); assert_ok!(Nfts::set_attribute(Origin::signed(1), 0, Some(0), bvec![0], bvec![0])); @@ -449,7 +449,7 @@ fn set_attribute_should_respect_freeze() { new_test_ext().execute_with(|| { Balances::make_free_balance_be(&1, 100); - assert_ok!(Nfts::force_create(Origin::root(), 0, 1, false)); + assert_ok!(Nfts::force_create(Origin::root(), 1, false)); assert_ok!(Nfts::set_attribute(Origin::signed(1), 0, None, bvec![0], bvec![0])); assert_ok!(Nfts::set_attribute(Origin::signed(1), 0, Some(0), bvec![0], bvec![0])); @@ -481,7 +481,7 @@ fn force_item_status_should_work() { new_test_ext().execute_with(|| { Balances::make_free_balance_be(&1, 100); - assert_ok!(Nfts::force_create(Origin::root(), 0, 1, false)); + assert_ok!(Nfts::force_create(Origin::root(), 1, false)); assert_ok!(Nfts::mint(Origin::signed(1), 0, 42, 1)); assert_ok!(Nfts::mint(Origin::signed(1), 0, 69, 2)); assert_ok!(Nfts::set_collection_metadata(Origin::signed(1), 0, bvec![0; 20], false)); @@ -515,7 +515,7 @@ fn force_item_status_should_work() { fn burn_works() { new_test_ext().execute_with(|| { Balances::make_free_balance_be(&1, 100); - assert_ok!(Nfts::force_create(Origin::root(), 0, 1, false)); + assert_ok!(Nfts::force_create(Origin::root(), 1, false)); assert_ok!(Nfts::set_team(Origin::signed(1), 0, 2, 3, 4)); assert_noop!( @@ -539,7 +539,7 @@ fn burn_works() { #[test] fn approval_lifecycle_works() { new_test_ext().execute_with(|| { - assert_ok!(Nfts::force_create(Origin::root(), 0, 1, true)); + assert_ok!(Nfts::force_create(Origin::root(), 1, true)); assert_ok!(Nfts::mint(Origin::signed(1), 0, 42, 2)); assert_ok!(Nfts::approve_transfer(Origin::signed(2), 0, 42, 3, None)); assert_ok!(Nfts::transfer(Origin::signed(3), 0, 42, 4)); @@ -554,7 +554,7 @@ fn approval_lifecycle_works() { #[test] fn cancel_approval_works() { new_test_ext().execute_with(|| { - assert_ok!(Nfts::force_create(Origin::root(), 0, 1, true)); + assert_ok!(Nfts::force_create(Origin::root(), 1, true)); assert_ok!(Nfts::mint(Origin::signed(1), 0, 42, 2)); assert_ok!(Nfts::approve_transfer(Origin::signed(2), 0, 42, 3, None)); @@ -601,7 +601,7 @@ fn cancel_approval_works() { #[test] fn approving_multiple_accounts_works() { new_test_ext().execute_with(|| { - assert_ok!(Nfts::force_create(Origin::root(), 0, 1, true)); + assert_ok!(Nfts::force_create(Origin::root(), 1, true)); assert_ok!(Nfts::mint(Origin::signed(1), 0, 42, 2)); let current_block = 1; @@ -620,7 +620,7 @@ fn approving_multiple_accounts_works() { #[test] fn approvals_limit_works() { new_test_ext().execute_with(|| { - assert_ok!(Nfts::force_create(Origin::root(), 0, 1, true)); + assert_ok!(Nfts::force_create(Origin::root(), 1, true)); assert_ok!(Nfts::mint(Origin::signed(1), 0, 42, 2)); for i in 3..13 { @@ -640,7 +640,7 @@ fn approval_deadline_works() { System::set_block_number(0); assert!(System::block_number().is_zero()); - assert_ok!(Nfts::force_create(Origin::root(), 0, 1, true)); + assert_ok!(Nfts::force_create(Origin::root(), 1, true)); assert_ok!(Nfts::mint(Origin::signed(1), 0, 42, 2)); // the approval expires after the 2nd block. @@ -664,7 +664,7 @@ fn approval_deadline_works() { #[test] fn cancel_approval_works_with_admin() { new_test_ext().execute_with(|| { - assert_ok!(Nfts::force_create(Origin::root(), 0, 1, true)); + assert_ok!(Nfts::force_create(Origin::root(), 1, true)); assert_ok!(Nfts::mint(Origin::signed(1), 0, 42, 2)); assert_ok!(Nfts::approve_transfer(Origin::signed(2), 0, 42, 3, None)); @@ -692,7 +692,7 @@ fn cancel_approval_works_with_admin() { #[test] fn cancel_approval_works_with_force() { new_test_ext().execute_with(|| { - assert_ok!(Nfts::force_create(Origin::root(), 0, 1, true)); + assert_ok!(Nfts::force_create(Origin::root(), 1, true)); assert_ok!(Nfts::mint(Origin::signed(1), 0, 42, 2)); assert_ok!(Nfts::approve_transfer(Origin::signed(2), 0, 42, 3, None)); @@ -714,7 +714,7 @@ fn cancel_approval_works_with_force() { #[test] fn clear_all_transfer_approvals_works() { new_test_ext().execute_with(|| { - assert_ok!(Nfts::force_create(Origin::root(), 0, 1, true)); + assert_ok!(Nfts::force_create(Origin::root(), 1, true)); assert_ok!(Nfts::mint(Origin::signed(1), 0, 42, 2)); assert_ok!(Nfts::approve_transfer(Origin::signed(2), 0, 42, 3, None)); @@ -747,7 +747,7 @@ fn max_supply_should_work() { let max_supply = 2; // validate set_collection_max_supply - assert_ok!(Nfts::force_create(Origin::root(), collection_id, user_id, true)); + assert_ok!(Nfts::force_create(Origin::root(), user_id, true)); assert!(!CollectionMaxSupply::::contains_key(collection_id)); assert_ok!(Nfts::set_collection_max_supply( @@ -793,7 +793,7 @@ fn set_price_should_work() { let item_1 = 1; let item_2 = 2; - assert_ok!(Nfts::force_create(Origin::root(), collection_id, user_id, true)); + assert_ok!(Nfts::force_create(Origin::root(), user_id, true)); assert_ok!(Nfts::mint(Origin::signed(user_id), collection_id, item_1, user_id)); assert_ok!(Nfts::mint(Origin::signed(user_id), collection_id, item_2, user_id)); @@ -851,7 +851,7 @@ fn buy_item_should_work() { Balances::make_free_balance_be(&user_2, initial_balance); Balances::make_free_balance_be(&user_3, initial_balance); - assert_ok!(Nfts::force_create(Origin::root(), collection_id, user_1, true)); + assert_ok!(Nfts::force_create(Origin::root(), user_1, true)); assert_ok!(Nfts::mint(Origin::signed(user_1), collection_id, item_1, user_1)); assert_ok!(Nfts::mint(Origin::signed(user_1), collection_id, item_2, user_1)); From 6434da8420cade31594db3aea83ea08decf29e0b Mon Sep 17 00:00:00 2001 From: Jegor Sidorenko Date: Sat, 24 Sep 2022 10:29:38 +0300 Subject: [PATCH 011/101] Rename Origin to RuntimeOrigin --- frame/nfts/Cargo.toml | 2 +- frame/nfts/src/lib.rs | 4 +- frame/nfts/src/mock.rs | 2 +- frame/nfts/src/tests.rs | 558 ++++++++++++++++++++++++---------------- 4 files changed, 336 insertions(+), 230 deletions(-) diff --git a/frame/nfts/Cargo.toml b/frame/nfts/Cargo.toml index 7f1ce4ff416b0..5e1a3d79d3a65 100644 --- a/frame/nfts/Cargo.toml +++ b/frame/nfts/Cargo.toml @@ -32,7 +32,7 @@ sp-std = { version = "4.0.0", path = "../../primitives/std" } default = ["std"] std = [ "codec/std", - "frame-benchmarking/std", + "frame-benchmarking?/std", "frame-support/std", "frame-system/std", "log/std", diff --git a/frame/nfts/src/lib.rs b/frame/nfts/src/lib.rs index 2cde6e5e1ab5d..ddd76aba024ef 100644 --- a/frame/nfts/src/lib.rs +++ b/frame/nfts/src/lib.rs @@ -127,12 +127,12 @@ pub mod pallet { /// The origin which may forcibly create or destroy an item or otherwise alter privileged /// attributes. - type ForceOrigin: EnsureOrigin; + type ForceOrigin: EnsureOrigin; /// Standard collection creation is only allowed if the origin attempting it and the /// collection are in this set. type CreateOrigin: EnsureOriginWithArg< - Self::Origin, + Self::RuntimeOrigin, Self::CollectionId, Success = Self::AccountId, >; diff --git a/frame/nfts/src/mock.rs b/frame/nfts/src/mock.rs index bfa6c185ed78c..8492291bfad05 100644 --- a/frame/nfts/src/mock.rs +++ b/frame/nfts/src/mock.rs @@ -49,7 +49,7 @@ impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = (); type BlockLength = (); - type Origin = Origin; + type RuntimeOrigin = RuntimeOrigin; type RuntimeCall = RuntimeCall; type Index = u64; type BlockNumber = u64; diff --git a/frame/nfts/src/tests.rs b/frame/nfts/src/tests.rs index f5d44bef7f886..ac08c35b23294 100644 --- a/frame/nfts/src/tests.rs +++ b/frame/nfts/src/tests.rs @@ -98,14 +98,14 @@ fn basic_setup_works() { #[test] fn basic_minting_should_work() { new_test_ext().execute_with(|| { - assert_ok!(Nfts::force_create(Origin::root(), 1, true)); + assert_ok!(Nfts::force_create(RuntimeOrigin::root(), 1, true)); assert_eq!(collections(), vec![(1, 0)]); - assert_ok!(Nfts::mint(Origin::signed(1), 0, 42, 1)); + assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 42, 1)); assert_eq!(items(), vec![(1, 0, 42)]); - assert_ok!(Nfts::force_create(Origin::root(), 2, true)); + assert_ok!(Nfts::force_create(RuntimeOrigin::root(), 2, true)); assert_eq!(collections(), vec![(1, 0), (2, 1)]); - assert_ok!(Nfts::mint(Origin::signed(2), 1, 69, 1)); + assert_ok!(Nfts::mint(RuntimeOrigin::signed(2), 1, 69, 1)); assert_eq!(items(), vec![(1, 0, 42), (1, 1, 69)]); }); } @@ -114,32 +114,32 @@ fn basic_minting_should_work() { fn lifecycle_should_work() { new_test_ext().execute_with(|| { Balances::make_free_balance_be(&1, 100); - assert_ok!(Nfts::create(Origin::signed(1), 1)); + assert_ok!(Nfts::create(RuntimeOrigin::signed(1), 1)); assert_eq!(Balances::reserved_balance(&1), 2); assert_eq!(collections(), vec![(1, 0)]); - assert_ok!(Nfts::set_collection_metadata(Origin::signed(1), 0, bvec![0, 0], false)); + assert_ok!(Nfts::set_collection_metadata(RuntimeOrigin::signed(1), 0, bvec![0, 0], false)); assert_eq!(Balances::reserved_balance(&1), 5); assert!(CollectionMetadataOf::::contains_key(0)); - assert_ok!(Nfts::mint(Origin::signed(1), 0, 42, 10)); + assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 42, 10)); assert_eq!(Balances::reserved_balance(&1), 6); - assert_ok!(Nfts::mint(Origin::signed(1), 0, 69, 20)); + assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 69, 20)); assert_eq!(Balances::reserved_balance(&1), 7); assert_eq!(items(), vec![(10, 0, 42), (20, 0, 69)]); assert_eq!(Collection::::get(0).unwrap().items, 2); assert_eq!(Collection::::get(0).unwrap().item_metadatas, 0); - assert_ok!(Nfts::set_metadata(Origin::signed(1), 0, 42, bvec![42, 42], false)); + assert_ok!(Nfts::set_metadata(RuntimeOrigin::signed(1), 0, 42, bvec![42, 42], false)); assert_eq!(Balances::reserved_balance(&1), 10); assert!(ItemMetadataOf::::contains_key(0, 42)); - assert_ok!(Nfts::set_metadata(Origin::signed(1), 0, 69, bvec![69, 69], false)); + assert_ok!(Nfts::set_metadata(RuntimeOrigin::signed(1), 0, 69, bvec![69, 69], false)); assert_eq!(Balances::reserved_balance(&1), 13); assert!(ItemMetadataOf::::contains_key(0, 69)); let w = Collection::::get(0).unwrap().destroy_witness(); assert_eq!(w.items, 2); assert_eq!(w.item_metadatas, 2); - assert_ok!(Nfts::destroy(Origin::signed(1), 0, w)); + assert_ok!(Nfts::destroy(RuntimeOrigin::signed(1), 0, w)); assert_eq!(Balances::reserved_balance(&1), 0); assert!(!Collection::::contains_key(0)); @@ -157,19 +157,19 @@ fn lifecycle_should_work() { fn destroy_with_bad_witness_should_not_work() { new_test_ext().execute_with(|| { Balances::make_free_balance_be(&1, 100); - assert_ok!(Nfts::create(Origin::signed(1), 1)); + assert_ok!(Nfts::create(RuntimeOrigin::signed(1), 1)); let w = Collection::::get(0).unwrap().destroy_witness(); - assert_ok!(Nfts::mint(Origin::signed(1), 0, 42, 1)); - assert_noop!(Nfts::destroy(Origin::signed(1), 0, w), Error::::BadWitness); + assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 42, 1)); + assert_noop!(Nfts::destroy(RuntimeOrigin::signed(1), 0, w), Error::::BadWitness); }); } #[test] fn mint_should_work() { new_test_ext().execute_with(|| { - assert_ok!(Nfts::force_create(Origin::root(), 1, true)); - assert_ok!(Nfts::mint(Origin::signed(1), 0, 42, 1)); + assert_ok!(Nfts::force_create(RuntimeOrigin::root(), 1, true)); + assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 42, 1)); assert_eq!(Nfts::owner(0, 42).unwrap(), 1); assert_eq!(collections(), vec![(1, 0)]); assert_eq!(items(), vec![(1, 0, 42)]); @@ -179,54 +179,63 @@ fn mint_should_work() { #[test] fn transfer_should_work() { new_test_ext().execute_with(|| { - assert_ok!(Nfts::force_create(Origin::root(), 1, true)); - assert_ok!(Nfts::mint(Origin::signed(1), 0, 42, 2)); + assert_ok!(Nfts::force_create(RuntimeOrigin::root(), 1, true)); + assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 42, 2)); - assert_ok!(Nfts::transfer(Origin::signed(2), 0, 42, 3)); + assert_ok!(Nfts::transfer(RuntimeOrigin::signed(2), 0, 42, 3)); assert_eq!(items(), vec![(3, 0, 42)]); - assert_noop!(Nfts::transfer(Origin::signed(2), 0, 42, 4), Error::::NoPermission); + assert_noop!( + Nfts::transfer(RuntimeOrigin::signed(2), 0, 42, 4), + Error::::NoPermission + ); - assert_ok!(Nfts::approve_transfer(Origin::signed(3), 0, 42, 2, None)); - assert_ok!(Nfts::transfer(Origin::signed(2), 0, 42, 4)); + assert_ok!(Nfts::approve_transfer(RuntimeOrigin::signed(3), 0, 42, 2, None)); + assert_ok!(Nfts::transfer(RuntimeOrigin::signed(2), 0, 42, 4)); }); } #[test] fn freezing_should_work() { new_test_ext().execute_with(|| { - assert_ok!(Nfts::force_create(Origin::root(), 1, true)); - assert_ok!(Nfts::mint(Origin::signed(1), 0, 42, 1)); - assert_ok!(Nfts::freeze(Origin::signed(1), 0, 42)); - assert_noop!(Nfts::transfer(Origin::signed(1), 0, 42, 2), Error::::Frozen); + assert_ok!(Nfts::force_create(RuntimeOrigin::root(), 1, true)); + assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 42, 1)); + assert_ok!(Nfts::freeze(RuntimeOrigin::signed(1), 0, 42)); + assert_noop!(Nfts::transfer(RuntimeOrigin::signed(1), 0, 42, 2), Error::::Frozen); - assert_ok!(Nfts::thaw(Origin::signed(1), 0, 42)); - assert_ok!(Nfts::freeze_collection(Origin::signed(1), 0)); - assert_noop!(Nfts::transfer(Origin::signed(1), 0, 42, 2), Error::::Frozen); + assert_ok!(Nfts::thaw(RuntimeOrigin::signed(1), 0, 42)); + assert_ok!(Nfts::freeze_collection(RuntimeOrigin::signed(1), 0)); + assert_noop!(Nfts::transfer(RuntimeOrigin::signed(1), 0, 42, 2), Error::::Frozen); - assert_ok!(Nfts::thaw_collection(Origin::signed(1), 0)); - assert_ok!(Nfts::transfer(Origin::signed(1), 0, 42, 2)); + assert_ok!(Nfts::thaw_collection(RuntimeOrigin::signed(1), 0)); + assert_ok!(Nfts::transfer(RuntimeOrigin::signed(1), 0, 42, 2)); }); } #[test] fn origin_guards_should_work() { new_test_ext().execute_with(|| { - assert_ok!(Nfts::force_create(Origin::root(), 1, true)); - assert_ok!(Nfts::mint(Origin::signed(1), 0, 42, 1)); + assert_ok!(Nfts::force_create(RuntimeOrigin::root(), 1, true)); + assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 42, 1)); Balances::make_free_balance_be(&2, 100); - assert_ok!(Nfts::set_accept_ownership(Origin::signed(2), Some(0))); + assert_ok!(Nfts::set_accept_ownership(RuntimeOrigin::signed(2), Some(0))); + assert_noop!( + Nfts::transfer_ownership(RuntimeOrigin::signed(2), 0, 2), + Error::::NoPermission + ); + assert_noop!( + Nfts::set_team(RuntimeOrigin::signed(2), 0, 2, 2, 2), + Error::::NoPermission + ); + assert_noop!(Nfts::freeze(RuntimeOrigin::signed(2), 0, 42), Error::::NoPermission); + assert_noop!(Nfts::thaw(RuntimeOrigin::signed(2), 0, 42), Error::::NoPermission); + assert_noop!(Nfts::mint(RuntimeOrigin::signed(2), 0, 69, 2), Error::::NoPermission); assert_noop!( - Nfts::transfer_ownership(Origin::signed(2), 0, 2), + Nfts::burn(RuntimeOrigin::signed(2), 0, 42, None), Error::::NoPermission ); - assert_noop!(Nfts::set_team(Origin::signed(2), 0, 2, 2, 2), Error::::NoPermission); - assert_noop!(Nfts::freeze(Origin::signed(2), 0, 42), Error::::NoPermission); - assert_noop!(Nfts::thaw(Origin::signed(2), 0, 42), Error::::NoPermission); - assert_noop!(Nfts::mint(Origin::signed(2), 0, 69, 2), Error::::NoPermission); - assert_noop!(Nfts::burn(Origin::signed(2), 0, 42, None), Error::::NoPermission); let w = Collection::::get(0).unwrap().destroy_witness(); - assert_noop!(Nfts::destroy(Origin::signed(2), 0, w), Error::::NoPermission); + assert_noop!(Nfts::destroy(RuntimeOrigin::signed(2), 0, w), Error::::NoPermission); }); } @@ -236,11 +245,14 @@ fn transfer_owner_should_work() { Balances::make_free_balance_be(&1, 100); Balances::make_free_balance_be(&2, 100); Balances::make_free_balance_be(&3, 100); - assert_ok!(Nfts::create(Origin::signed(1), 1)); + assert_ok!(Nfts::create(RuntimeOrigin::signed(1), 1)); assert_eq!(collections(), vec![(1, 0)]); - assert_noop!(Nfts::transfer_ownership(Origin::signed(1), 0, 2), Error::::Unaccepted); - assert_ok!(Nfts::set_accept_ownership(Origin::signed(2), Some(0))); - assert_ok!(Nfts::transfer_ownership(Origin::signed(1), 0, 2)); + assert_noop!( + Nfts::transfer_ownership(RuntimeOrigin::signed(1), 0, 2), + Error::::Unaccepted + ); + assert_ok!(Nfts::set_accept_ownership(RuntimeOrigin::signed(2), Some(0))); + assert_ok!(Nfts::transfer_ownership(RuntimeOrigin::signed(1), 0, 2)); assert_eq!(collections(), vec![(2, 0)]); assert_eq!(Balances::total_balance(&1), 98); @@ -248,18 +260,23 @@ fn transfer_owner_should_work() { assert_eq!(Balances::reserved_balance(&1), 0); assert_eq!(Balances::reserved_balance(&2), 2); - assert_ok!(Nfts::set_accept_ownership(Origin::signed(1), Some(0))); + assert_ok!(Nfts::set_accept_ownership(RuntimeOrigin::signed(1), Some(0))); assert_noop!( - Nfts::transfer_ownership(Origin::signed(1), 0, 1), + Nfts::transfer_ownership(RuntimeOrigin::signed(1), 0, 1), Error::::NoPermission ); // Mint and set metadata now and make sure that deposit gets transferred back. - assert_ok!(Nfts::set_collection_metadata(Origin::signed(2), 0, bvec![0u8; 20], false)); - assert_ok!(Nfts::mint(Origin::signed(1), 0, 42, 1)); - assert_ok!(Nfts::set_metadata(Origin::signed(2), 0, 42, bvec![0u8; 20], false)); - assert_ok!(Nfts::set_accept_ownership(Origin::signed(3), Some(0))); - assert_ok!(Nfts::transfer_ownership(Origin::signed(2), 0, 3)); + assert_ok!(Nfts::set_collection_metadata( + RuntimeOrigin::signed(2), + 0, + bvec![0u8; 20], + false + )); + assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 42, 1)); + assert_ok!(Nfts::set_metadata(RuntimeOrigin::signed(2), 0, 42, bvec![0u8; 20], false)); + assert_ok!(Nfts::set_accept_ownership(RuntimeOrigin::signed(3), Some(0))); + assert_ok!(Nfts::transfer_ownership(RuntimeOrigin::signed(2), 0, 3)); assert_eq!(collections(), vec![(3, 0)]); assert_eq!(Balances::total_balance(&2), 57); assert_eq!(Balances::total_balance(&3), 145); @@ -268,21 +285,24 @@ fn transfer_owner_should_work() { // 2's acceptence from before is reset when it became owner, so it cannot be transfered // without a fresh acceptance. - assert_noop!(Nfts::transfer_ownership(Origin::signed(3), 0, 2), Error::::Unaccepted); + assert_noop!( + Nfts::transfer_ownership(RuntimeOrigin::signed(3), 0, 2), + Error::::Unaccepted + ); }); } #[test] fn set_team_should_work() { new_test_ext().execute_with(|| { - assert_ok!(Nfts::force_create(Origin::root(), 1, true)); - assert_ok!(Nfts::set_team(Origin::signed(1), 0, 2, 3, 4)); - - assert_ok!(Nfts::mint(Origin::signed(2), 0, 42, 2)); - assert_ok!(Nfts::freeze(Origin::signed(4), 0, 42)); - assert_ok!(Nfts::thaw(Origin::signed(3), 0, 42)); - assert_ok!(Nfts::transfer(Origin::signed(3), 0, 42, 3)); - assert_ok!(Nfts::burn(Origin::signed(3), 0, 42, None)); + assert_ok!(Nfts::force_create(RuntimeOrigin::root(), 1, true)); + assert_ok!(Nfts::set_team(RuntimeOrigin::signed(1), 0, 2, 3, 4)); + + assert_ok!(Nfts::mint(RuntimeOrigin::signed(2), 0, 42, 2)); + assert_ok!(Nfts::freeze(RuntimeOrigin::signed(4), 0, 42)); + assert_ok!(Nfts::thaw(RuntimeOrigin::signed(3), 0, 42)); + assert_ok!(Nfts::transfer(RuntimeOrigin::signed(3), 0, 42, 3)); + assert_ok!(Nfts::burn(RuntimeOrigin::signed(3), 0, 42, None)); }); } @@ -291,56 +311,79 @@ fn set_collection_metadata_should_work() { new_test_ext().execute_with(|| { // Cannot add metadata to unknown item assert_noop!( - Nfts::set_collection_metadata(Origin::signed(1), 0, bvec![0u8; 20], false), + Nfts::set_collection_metadata(RuntimeOrigin::signed(1), 0, bvec![0u8; 20], false), Error::::UnknownCollection, ); - assert_ok!(Nfts::force_create(Origin::root(), 1, false)); + assert_ok!(Nfts::force_create(RuntimeOrigin::root(), 1, false)); // Cannot add metadata to unowned item assert_noop!( - Nfts::set_collection_metadata(Origin::signed(2), 0, bvec![0u8; 20], false), + Nfts::set_collection_metadata(RuntimeOrigin::signed(2), 0, bvec![0u8; 20], false), Error::::NoPermission, ); // Successfully add metadata and take deposit Balances::make_free_balance_be(&1, 30); - assert_ok!(Nfts::set_collection_metadata(Origin::signed(1), 0, bvec![0u8; 20], false)); + assert_ok!(Nfts::set_collection_metadata( + RuntimeOrigin::signed(1), + 0, + bvec![0u8; 20], + false + )); assert_eq!(Balances::free_balance(&1), 9); assert!(CollectionMetadataOf::::contains_key(0)); // Force origin works, too. - assert_ok!(Nfts::set_collection_metadata(Origin::root(), 0, bvec![0u8; 18], false)); + assert_ok!(Nfts::set_collection_metadata(RuntimeOrigin::root(), 0, bvec![0u8; 18], false)); // Update deposit - assert_ok!(Nfts::set_collection_metadata(Origin::signed(1), 0, bvec![0u8; 15], false)); + assert_ok!(Nfts::set_collection_metadata( + RuntimeOrigin::signed(1), + 0, + bvec![0u8; 15], + false + )); assert_eq!(Balances::free_balance(&1), 14); - assert_ok!(Nfts::set_collection_metadata(Origin::signed(1), 0, bvec![0u8; 25], false)); + assert_ok!(Nfts::set_collection_metadata( + RuntimeOrigin::signed(1), + 0, + bvec![0u8; 25], + false + )); assert_eq!(Balances::free_balance(&1), 4); // Cannot over-reserve assert_noop!( - Nfts::set_collection_metadata(Origin::signed(1), 0, bvec![0u8; 40], false), + Nfts::set_collection_metadata(RuntimeOrigin::signed(1), 0, bvec![0u8; 40], false), BalancesError::::InsufficientBalance, ); // Can't set or clear metadata once frozen - assert_ok!(Nfts::set_collection_metadata(Origin::signed(1), 0, bvec![0u8; 15], true)); + assert_ok!(Nfts::set_collection_metadata( + RuntimeOrigin::signed(1), + 0, + bvec![0u8; 15], + true + )); assert_noop!( - Nfts::set_collection_metadata(Origin::signed(1), 0, bvec![0u8; 15], false), + Nfts::set_collection_metadata(RuntimeOrigin::signed(1), 0, bvec![0u8; 15], false), Error::::Frozen, ); - assert_noop!(Nfts::clear_collection_metadata(Origin::signed(1), 0), Error::::Frozen); + assert_noop!( + Nfts::clear_collection_metadata(RuntimeOrigin::signed(1), 0), + Error::::Frozen + ); // Clear Metadata - assert_ok!(Nfts::set_collection_metadata(Origin::root(), 0, bvec![0u8; 15], false)); + assert_ok!(Nfts::set_collection_metadata(RuntimeOrigin::root(), 0, bvec![0u8; 15], false)); assert_noop!( - Nfts::clear_collection_metadata(Origin::signed(2), 0), + Nfts::clear_collection_metadata(RuntimeOrigin::signed(2), 0), Error::::NoPermission ); assert_noop!( - Nfts::clear_collection_metadata(Origin::signed(1), 1), + Nfts::clear_collection_metadata(RuntimeOrigin::signed(1), 1), Error::::UnknownCollection ); - assert_ok!(Nfts::clear_collection_metadata(Origin::signed(1), 0)); + assert_ok!(Nfts::clear_collection_metadata(RuntimeOrigin::signed(1), 0)); assert!(!CollectionMetadataOf::::contains_key(0)); }); } @@ -351,50 +394,53 @@ fn set_item_metadata_should_work() { Balances::make_free_balance_be(&1, 30); // Cannot add metadata to unknown item - assert_ok!(Nfts::force_create(Origin::root(), 1, false)); - assert_ok!(Nfts::mint(Origin::signed(1), 0, 42, 1)); + assert_ok!(Nfts::force_create(RuntimeOrigin::root(), 1, false)); + assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 42, 1)); // Cannot add metadata to unowned item assert_noop!( - Nfts::set_metadata(Origin::signed(2), 0, 42, bvec![0u8; 20], false), + Nfts::set_metadata(RuntimeOrigin::signed(2), 0, 42, bvec![0u8; 20], false), Error::::NoPermission, ); // Successfully add metadata and take deposit - assert_ok!(Nfts::set_metadata(Origin::signed(1), 0, 42, bvec![0u8; 20], false)); + assert_ok!(Nfts::set_metadata(RuntimeOrigin::signed(1), 0, 42, bvec![0u8; 20], false)); assert_eq!(Balances::free_balance(&1), 8); assert!(ItemMetadataOf::::contains_key(0, 42)); // Force origin works, too. - assert_ok!(Nfts::set_metadata(Origin::root(), 0, 42, bvec![0u8; 18], false)); + assert_ok!(Nfts::set_metadata(RuntimeOrigin::root(), 0, 42, bvec![0u8; 18], false)); // Update deposit - assert_ok!(Nfts::set_metadata(Origin::signed(1), 0, 42, bvec![0u8; 15], false)); + assert_ok!(Nfts::set_metadata(RuntimeOrigin::signed(1), 0, 42, bvec![0u8; 15], false)); assert_eq!(Balances::free_balance(&1), 13); - assert_ok!(Nfts::set_metadata(Origin::signed(1), 0, 42, bvec![0u8; 25], false)); + assert_ok!(Nfts::set_metadata(RuntimeOrigin::signed(1), 0, 42, bvec![0u8; 25], false)); assert_eq!(Balances::free_balance(&1), 3); // Cannot over-reserve assert_noop!( - Nfts::set_metadata(Origin::signed(1), 0, 42, bvec![0u8; 40], false), + Nfts::set_metadata(RuntimeOrigin::signed(1), 0, 42, bvec![0u8; 40], false), BalancesError::::InsufficientBalance, ); // Can't set or clear metadata once frozen - assert_ok!(Nfts::set_metadata(Origin::signed(1), 0, 42, bvec![0u8; 15], true)); + assert_ok!(Nfts::set_metadata(RuntimeOrigin::signed(1), 0, 42, bvec![0u8; 15], true)); assert_noop!( - Nfts::set_metadata(Origin::signed(1), 0, 42, bvec![0u8; 15], false), + Nfts::set_metadata(RuntimeOrigin::signed(1), 0, 42, bvec![0u8; 15], false), Error::::Frozen, ); - assert_noop!(Nfts::clear_metadata(Origin::signed(1), 0, 42), Error::::Frozen); + assert_noop!(Nfts::clear_metadata(RuntimeOrigin::signed(1), 0, 42), Error::::Frozen); // Clear Metadata - assert_ok!(Nfts::set_metadata(Origin::root(), 0, 42, bvec![0u8; 15], false)); - assert_noop!(Nfts::clear_metadata(Origin::signed(2), 0, 42), Error::::NoPermission); + assert_ok!(Nfts::set_metadata(RuntimeOrigin::root(), 0, 42, bvec![0u8; 15], false)); assert_noop!( - Nfts::clear_metadata(Origin::signed(1), 1, 42), + Nfts::clear_metadata(RuntimeOrigin::signed(2), 0, 42), + Error::::NoPermission + ); + assert_noop!( + Nfts::clear_metadata(RuntimeOrigin::signed(1), 1, 42), Error::::UnknownCollection ); - assert_ok!(Nfts::clear_metadata(Origin::signed(1), 0, 42)); + assert_ok!(Nfts::clear_metadata(RuntimeOrigin::signed(1), 0, 42)); assert!(!ItemMetadataOf::::contains_key(0, 42)); }); } @@ -404,11 +450,11 @@ fn set_attribute_should_work() { new_test_ext().execute_with(|| { Balances::make_free_balance_be(&1, 100); - assert_ok!(Nfts::force_create(Origin::root(), 1, false)); + assert_ok!(Nfts::force_create(RuntimeOrigin::root(), 1, false)); - assert_ok!(Nfts::set_attribute(Origin::signed(1), 0, None, bvec![0], bvec![0])); - assert_ok!(Nfts::set_attribute(Origin::signed(1), 0, Some(0), bvec![0], bvec![0])); - assert_ok!(Nfts::set_attribute(Origin::signed(1), 0, Some(0), bvec![1], bvec![0])); + assert_ok!(Nfts::set_attribute(RuntimeOrigin::signed(1), 0, None, bvec![0], bvec![0])); + assert_ok!(Nfts::set_attribute(RuntimeOrigin::signed(1), 0, Some(0), bvec![0], bvec![0])); + assert_ok!(Nfts::set_attribute(RuntimeOrigin::signed(1), 0, Some(0), bvec![1], bvec![0])); assert_eq!( attributes(0), vec![ @@ -419,7 +465,7 @@ fn set_attribute_should_work() { ); assert_eq!(Balances::reserved_balance(1), 9); - assert_ok!(Nfts::set_attribute(Origin::signed(1), 0, None, bvec![0], bvec![0; 10])); + assert_ok!(Nfts::set_attribute(RuntimeOrigin::signed(1), 0, None, bvec![0], bvec![0; 10])); assert_eq!( attributes(0), vec![ @@ -430,7 +476,7 @@ fn set_attribute_should_work() { ); assert_eq!(Balances::reserved_balance(1), 18); - assert_ok!(Nfts::clear_attribute(Origin::signed(1), 0, Some(0), bvec![1])); + assert_ok!(Nfts::clear_attribute(RuntimeOrigin::signed(1), 0, Some(0), bvec![1])); assert_eq!( attributes(0), vec![(None, bvec![0], bvec![0; 10]), (Some(0), bvec![0], bvec![0]),] @@ -438,7 +484,7 @@ fn set_attribute_should_work() { assert_eq!(Balances::reserved_balance(1), 15); let w = Collection::::get(0).unwrap().destroy_witness(); - assert_ok!(Nfts::destroy(Origin::signed(1), 0, w)); + assert_ok!(Nfts::destroy(RuntimeOrigin::signed(1), 0, w)); assert_eq!(attributes(0), vec![]); assert_eq!(Balances::reserved_balance(1), 0); }); @@ -449,11 +495,11 @@ fn set_attribute_should_respect_freeze() { new_test_ext().execute_with(|| { Balances::make_free_balance_be(&1, 100); - assert_ok!(Nfts::force_create(Origin::root(), 1, false)); + assert_ok!(Nfts::force_create(RuntimeOrigin::root(), 1, false)); - assert_ok!(Nfts::set_attribute(Origin::signed(1), 0, None, bvec![0], bvec![0])); - assert_ok!(Nfts::set_attribute(Origin::signed(1), 0, Some(0), bvec![0], bvec![0])); - assert_ok!(Nfts::set_attribute(Origin::signed(1), 0, Some(1), bvec![0], bvec![0])); + assert_ok!(Nfts::set_attribute(RuntimeOrigin::signed(1), 0, None, bvec![0], bvec![0])); + assert_ok!(Nfts::set_attribute(RuntimeOrigin::signed(1), 0, Some(0), bvec![0], bvec![0])); + assert_ok!(Nfts::set_attribute(RuntimeOrigin::signed(1), 0, Some(1), bvec![0], bvec![0])); assert_eq!( attributes(0), vec![ @@ -464,15 +510,18 @@ fn set_attribute_should_respect_freeze() { ); assert_eq!(Balances::reserved_balance(1), 9); - assert_ok!(Nfts::set_collection_metadata(Origin::signed(1), 0, bvec![], true)); + assert_ok!(Nfts::set_collection_metadata(RuntimeOrigin::signed(1), 0, bvec![], true)); let e = Error::::Frozen; - assert_noop!(Nfts::set_attribute(Origin::signed(1), 0, None, bvec![0], bvec![0]), e); - assert_ok!(Nfts::set_attribute(Origin::signed(1), 0, Some(0), bvec![0], bvec![1])); + assert_noop!(Nfts::set_attribute(RuntimeOrigin::signed(1), 0, None, bvec![0], bvec![0]), e); + assert_ok!(Nfts::set_attribute(RuntimeOrigin::signed(1), 0, Some(0), bvec![0], bvec![1])); - assert_ok!(Nfts::set_metadata(Origin::signed(1), 0, 0, bvec![], true)); + assert_ok!(Nfts::set_metadata(RuntimeOrigin::signed(1), 0, 0, bvec![], true)); let e = Error::::Frozen; - assert_noop!(Nfts::set_attribute(Origin::signed(1), 0, Some(0), bvec![0], bvec![1]), e); - assert_ok!(Nfts::set_attribute(Origin::signed(1), 0, Some(1), bvec![0], bvec![1])); + assert_noop!( + Nfts::set_attribute(RuntimeOrigin::signed(1), 0, Some(0), bvec![0], bvec![1]), + e + ); + assert_ok!(Nfts::set_attribute(RuntimeOrigin::signed(1), 0, Some(1), bvec![0], bvec![1])); }); } @@ -481,32 +530,32 @@ fn force_item_status_should_work() { new_test_ext().execute_with(|| { Balances::make_free_balance_be(&1, 100); - assert_ok!(Nfts::force_create(Origin::root(), 1, false)); - assert_ok!(Nfts::mint(Origin::signed(1), 0, 42, 1)); - assert_ok!(Nfts::mint(Origin::signed(1), 0, 69, 2)); - assert_ok!(Nfts::set_collection_metadata(Origin::signed(1), 0, bvec![0; 20], false)); - assert_ok!(Nfts::set_metadata(Origin::signed(1), 0, 42, bvec![0; 20], false)); - assert_ok!(Nfts::set_metadata(Origin::signed(1), 0, 69, bvec![0; 20], false)); + assert_ok!(Nfts::force_create(RuntimeOrigin::root(), 1, false)); + assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 42, 1)); + assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 69, 2)); + assert_ok!(Nfts::set_collection_metadata(RuntimeOrigin::signed(1), 0, bvec![0; 20], false)); + assert_ok!(Nfts::set_metadata(RuntimeOrigin::signed(1), 0, 42, bvec![0; 20], false)); + assert_ok!(Nfts::set_metadata(RuntimeOrigin::signed(1), 0, 69, bvec![0; 20], false)); assert_eq!(Balances::reserved_balance(1), 65); // force item status to be free holding - assert_ok!(Nfts::force_item_status(Origin::root(), 0, 1, 1, 1, 1, true, false)); - assert_ok!(Nfts::mint(Origin::signed(1), 0, 142, 1)); - assert_ok!(Nfts::mint(Origin::signed(1), 0, 169, 2)); - assert_ok!(Nfts::set_metadata(Origin::signed(1), 0, 142, bvec![0; 20], false)); - assert_ok!(Nfts::set_metadata(Origin::signed(1), 0, 169, bvec![0; 20], false)); + assert_ok!(Nfts::force_item_status(RuntimeOrigin::root(), 0, 1, 1, 1, 1, true, false)); + assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 142, 1)); + assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 169, 2)); + assert_ok!(Nfts::set_metadata(RuntimeOrigin::signed(1), 0, 142, bvec![0; 20], false)); + assert_ok!(Nfts::set_metadata(RuntimeOrigin::signed(1), 0, 169, bvec![0; 20], false)); assert_eq!(Balances::reserved_balance(1), 65); - assert_ok!(Nfts::redeposit(Origin::signed(1), 0, bvec![0, 42, 50, 69, 100])); + assert_ok!(Nfts::redeposit(RuntimeOrigin::signed(1), 0, bvec![0, 42, 50, 69, 100])); assert_eq!(Balances::reserved_balance(1), 63); - assert_ok!(Nfts::set_metadata(Origin::signed(1), 0, 42, bvec![0; 20], false)); + assert_ok!(Nfts::set_metadata(RuntimeOrigin::signed(1), 0, 42, bvec![0; 20], false)); assert_eq!(Balances::reserved_balance(1), 42); - assert_ok!(Nfts::set_metadata(Origin::signed(1), 0, 69, bvec![0; 20], false)); + assert_ok!(Nfts::set_metadata(RuntimeOrigin::signed(1), 0, 69, bvec![0; 20], false)); assert_eq!(Balances::reserved_balance(1), 21); - assert_ok!(Nfts::set_collection_metadata(Origin::signed(1), 0, bvec![0; 20], false)); + assert_ok!(Nfts::set_collection_metadata(RuntimeOrigin::signed(1), 0, bvec![0; 20], false)); assert_eq!(Balances::reserved_balance(1), 0); }); } @@ -515,23 +564,29 @@ fn force_item_status_should_work() { fn burn_works() { new_test_ext().execute_with(|| { Balances::make_free_balance_be(&1, 100); - assert_ok!(Nfts::force_create(Origin::root(), 1, false)); - assert_ok!(Nfts::set_team(Origin::signed(1), 0, 2, 3, 4)); + assert_ok!(Nfts::force_create(RuntimeOrigin::root(), 1, false)); + assert_ok!(Nfts::set_team(RuntimeOrigin::signed(1), 0, 2, 3, 4)); assert_noop!( - Nfts::burn(Origin::signed(5), 0, 42, Some(5)), + Nfts::burn(RuntimeOrigin::signed(5), 0, 42, Some(5)), Error::::UnknownCollection ); - assert_ok!(Nfts::mint(Origin::signed(2), 0, 42, 5)); - assert_ok!(Nfts::mint(Origin::signed(2), 0, 69, 5)); + assert_ok!(Nfts::mint(RuntimeOrigin::signed(2), 0, 42, 5)); + assert_ok!(Nfts::mint(RuntimeOrigin::signed(2), 0, 69, 5)); assert_eq!(Balances::reserved_balance(1), 2); - assert_noop!(Nfts::burn(Origin::signed(0), 0, 42, None), Error::::NoPermission); - assert_noop!(Nfts::burn(Origin::signed(5), 0, 42, Some(6)), Error::::WrongOwner); + assert_noop!( + Nfts::burn(RuntimeOrigin::signed(0), 0, 42, None), + Error::::NoPermission + ); + assert_noop!( + Nfts::burn(RuntimeOrigin::signed(5), 0, 42, Some(6)), + Error::::WrongOwner + ); - assert_ok!(Nfts::burn(Origin::signed(5), 0, 42, Some(5))); - assert_ok!(Nfts::burn(Origin::signed(3), 0, 69, Some(5))); + assert_ok!(Nfts::burn(RuntimeOrigin::signed(5), 0, 42, Some(5))); + assert_ok!(Nfts::burn(RuntimeOrigin::signed(3), 0, 69, Some(5))); assert_eq!(Balances::reserved_balance(1), 0); }); } @@ -539,61 +594,64 @@ fn burn_works() { #[test] fn approval_lifecycle_works() { new_test_ext().execute_with(|| { - assert_ok!(Nfts::force_create(Origin::root(), 1, true)); - assert_ok!(Nfts::mint(Origin::signed(1), 0, 42, 2)); - assert_ok!(Nfts::approve_transfer(Origin::signed(2), 0, 42, 3, None)); - assert_ok!(Nfts::transfer(Origin::signed(3), 0, 42, 4)); - assert_noop!(Nfts::transfer(Origin::signed(3), 0, 42, 3), Error::::NoPermission); + assert_ok!(Nfts::force_create(RuntimeOrigin::root(), 1, true)); + assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 42, 2)); + assert_ok!(Nfts::approve_transfer(RuntimeOrigin::signed(2), 0, 42, 3, None)); + assert_ok!(Nfts::transfer(RuntimeOrigin::signed(3), 0, 42, 4)); + assert_noop!( + Nfts::transfer(RuntimeOrigin::signed(3), 0, 42, 3), + Error::::NoPermission + ); assert!(Item::::get(0, 42).unwrap().approvals.is_empty()); - assert_ok!(Nfts::approve_transfer(Origin::signed(4), 0, 42, 2, None)); - assert_ok!(Nfts::transfer(Origin::signed(2), 0, 42, 2)); + assert_ok!(Nfts::approve_transfer(RuntimeOrigin::signed(4), 0, 42, 2, None)); + assert_ok!(Nfts::transfer(RuntimeOrigin::signed(2), 0, 42, 2)); }); } #[test] fn cancel_approval_works() { new_test_ext().execute_with(|| { - assert_ok!(Nfts::force_create(Origin::root(), 1, true)); - assert_ok!(Nfts::mint(Origin::signed(1), 0, 42, 2)); + assert_ok!(Nfts::force_create(RuntimeOrigin::root(), 1, true)); + assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 42, 2)); - assert_ok!(Nfts::approve_transfer(Origin::signed(2), 0, 42, 3, None)); + assert_ok!(Nfts::approve_transfer(RuntimeOrigin::signed(2), 0, 42, 3, None)); assert_noop!( - Nfts::cancel_approval(Origin::signed(2), 1, 42, 3), + Nfts::cancel_approval(RuntimeOrigin::signed(2), 1, 42, 3), Error::::UnknownCollection ); assert_noop!( - Nfts::cancel_approval(Origin::signed(2), 0, 43, 3), + Nfts::cancel_approval(RuntimeOrigin::signed(2), 0, 43, 3), Error::::UnknownCollection ); assert_noop!( - Nfts::cancel_approval(Origin::signed(3), 0, 42, 3), + Nfts::cancel_approval(RuntimeOrigin::signed(3), 0, 42, 3), Error::::NoPermission ); assert_noop!( - Nfts::cancel_approval(Origin::signed(2), 0, 42, 4), + Nfts::cancel_approval(RuntimeOrigin::signed(2), 0, 42, 4), Error::::NotDelegate ); - assert_ok!(Nfts::cancel_approval(Origin::signed(2), 0, 42, 3)); + assert_ok!(Nfts::cancel_approval(RuntimeOrigin::signed(2), 0, 42, 3)); assert_noop!( - Nfts::cancel_approval(Origin::signed(2), 0, 42, 3), + Nfts::cancel_approval(RuntimeOrigin::signed(2), 0, 42, 3), Error::::NotDelegate ); let current_block = 1; System::set_block_number(current_block); - assert_ok!(Nfts::mint(Origin::signed(1), 0, 69, 2)); + assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 69, 2)); // approval expires after 2 blocks. - assert_ok!(Nfts::approve_transfer(Origin::signed(2), 0, 42, 3, Some(2))); + assert_ok!(Nfts::approve_transfer(RuntimeOrigin::signed(2), 0, 42, 3, Some(2))); assert_noop!( - Nfts::cancel_approval(Origin::signed(5), 0, 42, 3), + Nfts::cancel_approval(RuntimeOrigin::signed(5), 0, 42, 3), Error::::NoPermission ); System::set_block_number(current_block + 3); // 5 can cancel the approval since the deadline has passed. - assert_ok!(Nfts::cancel_approval(Origin::signed(5), 0, 42, 3)); + assert_ok!(Nfts::cancel_approval(RuntimeOrigin::signed(5), 0, 42, 3)); assert_eq!(approvals(0, 69), vec![]); }); } @@ -601,34 +659,40 @@ fn cancel_approval_works() { #[test] fn approving_multiple_accounts_works() { new_test_ext().execute_with(|| { - assert_ok!(Nfts::force_create(Origin::root(), 1, true)); - assert_ok!(Nfts::mint(Origin::signed(1), 0, 42, 2)); + assert_ok!(Nfts::force_create(RuntimeOrigin::root(), 1, true)); + assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 42, 2)); let current_block = 1; System::set_block_number(current_block); - assert_ok!(Nfts::approve_transfer(Origin::signed(2), 0, 42, 3, None)); - assert_ok!(Nfts::approve_transfer(Origin::signed(2), 0, 42, 4, None)); - assert_ok!(Nfts::approve_transfer(Origin::signed(2), 0, 42, 5, Some(2))); + assert_ok!(Nfts::approve_transfer(RuntimeOrigin::signed(2), 0, 42, 3, None)); + assert_ok!(Nfts::approve_transfer(RuntimeOrigin::signed(2), 0, 42, 4, None)); + assert_ok!(Nfts::approve_transfer(RuntimeOrigin::signed(2), 0, 42, 5, Some(2))); assert_eq!(approvals(0, 42), vec![(3, None), (4, None), (5, Some(current_block + 2))]); - assert_ok!(Nfts::transfer(Origin::signed(4), 0, 42, 6)); - assert_noop!(Nfts::transfer(Origin::signed(3), 0, 42, 7), Error::::NoPermission); - assert_noop!(Nfts::transfer(Origin::signed(5), 0, 42, 8), Error::::NoPermission); + assert_ok!(Nfts::transfer(RuntimeOrigin::signed(4), 0, 42, 6)); + assert_noop!( + Nfts::transfer(RuntimeOrigin::signed(3), 0, 42, 7), + Error::::NoPermission + ); + assert_noop!( + Nfts::transfer(RuntimeOrigin::signed(5), 0, 42, 8), + Error::::NoPermission + ); }); } #[test] fn approvals_limit_works() { new_test_ext().execute_with(|| { - assert_ok!(Nfts::force_create(Origin::root(), 1, true)); - assert_ok!(Nfts::mint(Origin::signed(1), 0, 42, 2)); + assert_ok!(Nfts::force_create(RuntimeOrigin::root(), 1, true)); + assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 42, 2)); for i in 3..13 { - assert_ok!(Nfts::approve_transfer(Origin::signed(2), 0, 42, i, None)); + assert_ok!(Nfts::approve_transfer(RuntimeOrigin::signed(2), 0, 42, i, None)); } // the limit is 10 assert_noop!( - Nfts::approve_transfer(Origin::signed(2), 0, 42, 14, None), + Nfts::approve_transfer(RuntimeOrigin::signed(2), 0, 42, 14, None), Error::::ReachedApprovalLimit ); }); @@ -640,50 +704,53 @@ fn approval_deadline_works() { System::set_block_number(0); assert!(System::block_number().is_zero()); - assert_ok!(Nfts::force_create(Origin::root(), 1, true)); - assert_ok!(Nfts::mint(Origin::signed(1), 0, 42, 2)); + assert_ok!(Nfts::force_create(RuntimeOrigin::root(), 1, true)); + assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 42, 2)); // the approval expires after the 2nd block. - assert_ok!(Nfts::approve_transfer(Origin::signed(2), 0, 42, 3, Some(2))); + assert_ok!(Nfts::approve_transfer(RuntimeOrigin::signed(2), 0, 42, 3, Some(2))); System::set_block_number(3); - assert_noop!(Nfts::transfer(Origin::signed(3), 0, 42, 4), Error::::ApprovalExpired); + assert_noop!( + Nfts::transfer(RuntimeOrigin::signed(3), 0, 42, 4), + Error::::ApprovalExpired + ); System::set_block_number(1); - assert_ok!(Nfts::transfer(Origin::signed(3), 0, 42, 4)); + assert_ok!(Nfts::transfer(RuntimeOrigin::signed(3), 0, 42, 4)); assert_eq!(System::block_number(), 1); // make a new approval with a deadline after 4 blocks, so it will expire after the 5th // block. - assert_ok!(Nfts::approve_transfer(Origin::signed(4), 0, 42, 6, Some(4))); + assert_ok!(Nfts::approve_transfer(RuntimeOrigin::signed(4), 0, 42, 6, Some(4))); // this should still work. System::set_block_number(5); - assert_ok!(Nfts::transfer(Origin::signed(6), 0, 42, 5)); + assert_ok!(Nfts::transfer(RuntimeOrigin::signed(6), 0, 42, 5)); }); } #[test] fn cancel_approval_works_with_admin() { new_test_ext().execute_with(|| { - assert_ok!(Nfts::force_create(Origin::root(), 1, true)); - assert_ok!(Nfts::mint(Origin::signed(1), 0, 42, 2)); + assert_ok!(Nfts::force_create(RuntimeOrigin::root(), 1, true)); + assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 42, 2)); - assert_ok!(Nfts::approve_transfer(Origin::signed(2), 0, 42, 3, None)); + assert_ok!(Nfts::approve_transfer(RuntimeOrigin::signed(2), 0, 42, 3, None)); assert_noop!( - Nfts::cancel_approval(Origin::signed(1), 1, 42, 1), + Nfts::cancel_approval(RuntimeOrigin::signed(1), 1, 42, 1), Error::::UnknownCollection ); assert_noop!( - Nfts::cancel_approval(Origin::signed(1), 0, 43, 1), + Nfts::cancel_approval(RuntimeOrigin::signed(1), 0, 43, 1), Error::::UnknownCollection ); assert_noop!( - Nfts::cancel_approval(Origin::signed(1), 0, 42, 4), + Nfts::cancel_approval(RuntimeOrigin::signed(1), 0, 42, 4), Error::::NotDelegate ); - assert_ok!(Nfts::cancel_approval(Origin::signed(1), 0, 42, 3)); + assert_ok!(Nfts::cancel_approval(RuntimeOrigin::signed(1), 0, 42, 3)); assert_noop!( - Nfts::cancel_approval(Origin::signed(1), 0, 42, 1), + Nfts::cancel_approval(RuntimeOrigin::signed(1), 0, 42, 1), Error::::NotDelegate ); }); @@ -692,40 +759,46 @@ fn cancel_approval_works_with_admin() { #[test] fn cancel_approval_works_with_force() { new_test_ext().execute_with(|| { - assert_ok!(Nfts::force_create(Origin::root(), 1, true)); - assert_ok!(Nfts::mint(Origin::signed(1), 0, 42, 2)); + assert_ok!(Nfts::force_create(RuntimeOrigin::root(), 1, true)); + assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 42, 2)); - assert_ok!(Nfts::approve_transfer(Origin::signed(2), 0, 42, 3, None)); + assert_ok!(Nfts::approve_transfer(RuntimeOrigin::signed(2), 0, 42, 3, None)); assert_noop!( - Nfts::cancel_approval(Origin::root(), 1, 42, 1), + Nfts::cancel_approval(RuntimeOrigin::root(), 1, 42, 1), Error::::UnknownCollection ); assert_noop!( - Nfts::cancel_approval(Origin::root(), 0, 43, 1), + Nfts::cancel_approval(RuntimeOrigin::root(), 0, 43, 1), Error::::UnknownCollection ); - assert_noop!(Nfts::cancel_approval(Origin::root(), 0, 42, 4), Error::::NotDelegate); + assert_noop!( + Nfts::cancel_approval(RuntimeOrigin::root(), 0, 42, 4), + Error::::NotDelegate + ); - assert_ok!(Nfts::cancel_approval(Origin::root(), 0, 42, 3)); - assert_noop!(Nfts::cancel_approval(Origin::root(), 0, 42, 1), Error::::NotDelegate); + assert_ok!(Nfts::cancel_approval(RuntimeOrigin::root(), 0, 42, 3)); + assert_noop!( + Nfts::cancel_approval(RuntimeOrigin::root(), 0, 42, 1), + Error::::NotDelegate + ); }); } #[test] fn clear_all_transfer_approvals_works() { new_test_ext().execute_with(|| { - assert_ok!(Nfts::force_create(Origin::root(), 1, true)); - assert_ok!(Nfts::mint(Origin::signed(1), 0, 42, 2)); + assert_ok!(Nfts::force_create(RuntimeOrigin::root(), 1, true)); + assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 42, 2)); - assert_ok!(Nfts::approve_transfer(Origin::signed(2), 0, 42, 3, None)); - assert_ok!(Nfts::approve_transfer(Origin::signed(2), 0, 42, 4, None)); + assert_ok!(Nfts::approve_transfer(RuntimeOrigin::signed(2), 0, 42, 3, None)); + assert_ok!(Nfts::approve_transfer(RuntimeOrigin::signed(2), 0, 42, 4, None)); assert_noop!( - Nfts::clear_all_transfer_approvals(Origin::signed(3), 0, 42), + Nfts::clear_all_transfer_approvals(RuntimeOrigin::signed(3), 0, 42), Error::::NoPermission ); - assert_ok!(Nfts::clear_all_transfer_approvals(Origin::signed(2), 0, 42)); + assert_ok!(Nfts::clear_all_transfer_approvals(RuntimeOrigin::signed(2), 0, 42)); assert!(events().contains(&Event::::AllApprovalsCancelled { collection: 0, @@ -734,8 +807,14 @@ fn clear_all_transfer_approvals_works() { })); assert_eq!(approvals(0, 42), vec![]); - assert_noop!(Nfts::transfer(Origin::signed(3), 0, 42, 5), Error::::NoPermission); - assert_noop!(Nfts::transfer(Origin::signed(4), 0, 42, 5), Error::::NoPermission); + assert_noop!( + Nfts::transfer(RuntimeOrigin::signed(3), 0, 42, 5), + Error::::NoPermission + ); + assert_noop!( + Nfts::transfer(RuntimeOrigin::signed(4), 0, 42, 5), + Error::::NoPermission + ); }); } @@ -747,11 +826,11 @@ fn max_supply_should_work() { let max_supply = 2; // validate set_collection_max_supply - assert_ok!(Nfts::force_create(Origin::root(), user_id, true)); + assert_ok!(Nfts::force_create(RuntimeOrigin::root(), user_id, true)); assert!(!CollectionMaxSupply::::contains_key(collection_id)); assert_ok!(Nfts::set_collection_max_supply( - Origin::signed(user_id), + RuntimeOrigin::signed(user_id), collection_id, max_supply )); @@ -763,21 +842,25 @@ fn max_supply_should_work() { })); assert_noop!( - Nfts::set_collection_max_supply(Origin::signed(user_id), collection_id, max_supply + 1), + Nfts::set_collection_max_supply( + RuntimeOrigin::signed(user_id), + collection_id, + max_supply + 1 + ), Error::::MaxSupplyAlreadySet ); // validate we can't mint more to max supply - assert_ok!(Nfts::mint(Origin::signed(user_id), collection_id, 0, user_id)); - assert_ok!(Nfts::mint(Origin::signed(user_id), collection_id, 1, user_id)); + assert_ok!(Nfts::mint(RuntimeOrigin::signed(user_id), collection_id, 0, user_id)); + assert_ok!(Nfts::mint(RuntimeOrigin::signed(user_id), collection_id, 1, user_id)); assert_noop!( - Nfts::mint(Origin::signed(user_id), collection_id, 2, user_id), + Nfts::mint(RuntimeOrigin::signed(user_id), collection_id, 2, user_id), Error::::MaxSupplyReached ); // validate we remove the CollectionMaxSupply record when we destroy the collection assert_ok!(Nfts::destroy( - Origin::signed(user_id), + RuntimeOrigin::signed(user_id), collection_id, Collection::::get(collection_id).unwrap().destroy_witness() )); @@ -793,15 +876,21 @@ fn set_price_should_work() { let item_1 = 1; let item_2 = 2; - assert_ok!(Nfts::force_create(Origin::root(), user_id, true)); + assert_ok!(Nfts::force_create(RuntimeOrigin::root(), user_id, true)); - assert_ok!(Nfts::mint(Origin::signed(user_id), collection_id, item_1, user_id)); - assert_ok!(Nfts::mint(Origin::signed(user_id), collection_id, item_2, user_id)); + assert_ok!(Nfts::mint(RuntimeOrigin::signed(user_id), collection_id, item_1, user_id)); + assert_ok!(Nfts::mint(RuntimeOrigin::signed(user_id), collection_id, item_2, user_id)); - assert_ok!(Nfts::set_price(Origin::signed(user_id), collection_id, item_1, Some(1), None,)); + assert_ok!(Nfts::set_price( + RuntimeOrigin::signed(user_id), + collection_id, + item_1, + Some(1), + None, + )); assert_ok!(Nfts::set_price( - Origin::signed(user_id), + RuntimeOrigin::signed(user_id), collection_id, item_2, Some(2), @@ -824,7 +913,13 @@ fn set_price_should_work() { })); // validate we can unset the price - assert_ok!(Nfts::set_price(Origin::signed(user_id), collection_id, item_2, None, None)); + assert_ok!(Nfts::set_price( + RuntimeOrigin::signed(user_id), + collection_id, + item_2, + None, + None + )); assert!(events().contains(&Event::::ItemPriceRemoved { collection: collection_id, item: item_2 @@ -851,14 +946,14 @@ fn buy_item_should_work() { Balances::make_free_balance_be(&user_2, initial_balance); Balances::make_free_balance_be(&user_3, initial_balance); - assert_ok!(Nfts::force_create(Origin::root(), user_1, true)); + assert_ok!(Nfts::force_create(RuntimeOrigin::root(), user_1, true)); - assert_ok!(Nfts::mint(Origin::signed(user_1), collection_id, item_1, user_1)); - assert_ok!(Nfts::mint(Origin::signed(user_1), collection_id, item_2, user_1)); - assert_ok!(Nfts::mint(Origin::signed(user_1), collection_id, item_3, user_1)); + assert_ok!(Nfts::mint(RuntimeOrigin::signed(user_1), collection_id, item_1, user_1)); + assert_ok!(Nfts::mint(RuntimeOrigin::signed(user_1), collection_id, item_2, user_1)); + assert_ok!(Nfts::mint(RuntimeOrigin::signed(user_1), collection_id, item_3, user_1)); assert_ok!(Nfts::set_price( - Origin::signed(user_1), + RuntimeOrigin::signed(user_1), collection_id, item_1, Some(price_1), @@ -866,7 +961,7 @@ fn buy_item_should_work() { )); assert_ok!(Nfts::set_price( - Origin::signed(user_1), + RuntimeOrigin::signed(user_1), collection_id, item_2, Some(price_2), @@ -875,12 +970,17 @@ fn buy_item_should_work() { // can't buy for less assert_noop!( - Nfts::buy_item(Origin::signed(user_2), collection_id, item_1, 1), + Nfts::buy_item(RuntimeOrigin::signed(user_2), collection_id, item_1, 1), Error::::BidTooLow ); // pass the higher price to validate it will still deduct correctly - assert_ok!(Nfts::buy_item(Origin::signed(user_2), collection_id, item_1, price_1 + 1,)); + assert_ok!(Nfts::buy_item( + RuntimeOrigin::signed(user_2), + collection_id, + item_1, + price_1 + 1, + )); // validate the new owner & balances let item = Item::::get(collection_id, item_1).unwrap(); @@ -890,18 +990,18 @@ fn buy_item_should_work() { // can't buy from yourself assert_noop!( - Nfts::buy_item(Origin::signed(user_1), collection_id, item_2, price_2), + Nfts::buy_item(RuntimeOrigin::signed(user_1), collection_id, item_2, price_2), Error::::NoPermission ); // can't buy when the item is listed for a specific buyer assert_noop!( - Nfts::buy_item(Origin::signed(user_2), collection_id, item_2, price_2), + Nfts::buy_item(RuntimeOrigin::signed(user_2), collection_id, item_2, price_2), Error::::NoPermission ); // can buy when I'm a whitelisted buyer - assert_ok!(Nfts::buy_item(Origin::signed(user_3), collection_id, item_2, price_2,)); + assert_ok!(Nfts::buy_item(RuntimeOrigin::signed(user_3), collection_id, item_2, price_2,)); assert!(events().contains(&Event::::ItemBought { collection: collection_id, @@ -916,14 +1016,14 @@ fn buy_item_should_work() { // can't buy when item is not for sale assert_noop!( - Nfts::buy_item(Origin::signed(user_2), collection_id, item_3, price_2), + Nfts::buy_item(RuntimeOrigin::signed(user_2), collection_id, item_3, price_2), Error::::NotForSale ); // ensure we can't buy an item when the collection or an item is frozen { assert_ok!(Nfts::set_price( - Origin::signed(user_1), + RuntimeOrigin::signed(user_1), collection_id, item_3, Some(price_1), @@ -931,26 +1031,32 @@ fn buy_item_should_work() { )); // freeze collection - assert_ok!(Nfts::freeze_collection(Origin::signed(user_1), collection_id)); + assert_ok!(Nfts::freeze_collection(RuntimeOrigin::signed(user_1), collection_id)); let buy_item_call = mock::RuntimeCall::Nfts(crate::Call::::buy_item { collection: collection_id, item: item_3, bid_price: price_1, }); - assert_noop!(buy_item_call.dispatch(Origin::signed(user_2)), Error::::Frozen); + assert_noop!( + buy_item_call.dispatch(RuntimeOrigin::signed(user_2)), + Error::::Frozen + ); - assert_ok!(Nfts::thaw_collection(Origin::signed(user_1), collection_id)); + assert_ok!(Nfts::thaw_collection(RuntimeOrigin::signed(user_1), collection_id)); // freeze item - assert_ok!(Nfts::freeze(Origin::signed(user_1), collection_id, item_3)); + assert_ok!(Nfts::freeze(RuntimeOrigin::signed(user_1), collection_id, item_3)); let buy_item_call = mock::RuntimeCall::Nfts(crate::Call::::buy_item { collection: collection_id, item: item_3, bid_price: price_1, }); - assert_noop!(buy_item_call.dispatch(Origin::signed(user_2)), Error::::Frozen); + assert_noop!( + buy_item_call.dispatch(RuntimeOrigin::signed(user_2)), + Error::::Frozen + ); } }); } From 61dee82b0c0085306eeb995c5dab4083c1a30aeb Mon Sep 17 00:00:00 2001 From: Jegor Sidorenko <5252494+jsidorenko@users.noreply.github.com> Date: Sat, 24 Sep 2022 12:15:31 +0300 Subject: [PATCH 012/101] [Uniques V2] Tips (#12168) * Allow to add tips when buying an NFT * Chore * Rework tips feature * Add weights + benchmarks * Convert tuple to struct * Fix benchmark * ".git/.scripts/bench-bot.sh" pallet dev pallet_nfts * Update frame/nfts/src/benchmarking.rs Co-authored-by: Oliver Tale-Yazdi * Fix benchmarks * Revert the bounded_vec![] approach * ".git/.scripts/bench-bot.sh" pallet dev pallet_nfts Co-authored-by: command-bot <> Co-authored-by: Oliver Tale-Yazdi --- bin/node/runtime/src/lib.rs | 2 + frame/nfts/src/benchmarking.rs | 23 ++++ frame/nfts/src/features/buy_sell.rs | 42 +++++++ frame/nfts/src/features/mod.rs | 18 +++ frame/nfts/src/lib.rs | 30 +++++ frame/nfts/src/mock.rs | 1 + frame/nfts/src/tests.rs | 47 ++++++- frame/nfts/src/types.rs | 21 +++- frame/nfts/src/weights.rs | 187 +++++++++++++++------------- 9 files changed, 282 insertions(+), 89 deletions(-) create mode 100644 frame/nfts/src/features/buy_sell.rs create mode 100644 frame/nfts/src/features/mod.rs diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index 7bb244ccd5732..eb6941e85215c 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -1472,6 +1472,7 @@ parameter_types! { pub const KeyLimit: u32 = 32; pub const ValueLimit: u32 = 256; pub const ApprovalsLimit: u32 = 20; + pub const MaxTips: u32 = 10; } impl pallet_uniques::Config for Runtime { @@ -1510,6 +1511,7 @@ impl pallet_nfts::Config for Runtime { type KeyLimit = KeyLimit; type ValueLimit = ValueLimit; type ApprovalsLimit = ApprovalsLimit; + type MaxTips = MaxTips; type WeightInfo = pallet_nfts::weights::SubstrateWeight; #[cfg(feature = "runtime-benchmarks")] type Helper = (); diff --git a/frame/nfts/src/benchmarking.rs b/frame/nfts/src/benchmarking.rs index eca404df2f142..00527abc99e02 100644 --- a/frame/nfts/src/benchmarking.rs +++ b/frame/nfts/src/benchmarking.rs @@ -453,5 +453,28 @@ benchmarks_instance_pallet! { }.into()); } + pay_tips { + let n in 0 .. T::MaxTips::get() as u32; + let amount = BalanceOf::::from(100u32); + let caller: T::AccountId = whitelisted_caller(); + let collection = T::Helper::collection(0); + let item = T::Helper::item(0); + let tips: BoundedVec<_, _> = vec![ + ItemTip + { collection, item, receiver: caller.clone(), amount }; n as usize + ].try_into().unwrap(); + }: _(SystemOrigin::Signed(caller.clone()), tips) + verify { + if !n.is_zero() { + assert_last_event::(Event::TipSent { + collection, + item, + sender: caller.clone(), + receiver: caller.clone(), + amount, + }.into()); + } + } + impl_benchmark_test_suite!(Nfts, crate::mock::new_test_ext(), crate::mock::Test); } diff --git a/frame/nfts/src/features/buy_sell.rs b/frame/nfts/src/features/buy_sell.rs new file mode 100644 index 0000000000000..295d7fadfa8e6 --- /dev/null +++ b/frame/nfts/src/features/buy_sell.rs @@ -0,0 +1,42 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::*; +use frame_support::{ + pallet_prelude::*, + traits::{Currency, ExistenceRequirement::KeepAlive}, +}; + +impl, I: 'static> Pallet { + pub fn do_pay_tips( + sender: T::AccountId, + tips: BoundedVec, T::MaxTips>, + ) -> DispatchResult { + for tip in tips { + let ItemTip { collection, item, receiver, amount } = tip; + T::Currency::transfer(&sender, &receiver, amount, KeepAlive)?; + Self::deposit_event(Event::TipSent { + collection, + item, + sender: sender.clone(), + receiver, + amount, + }); + } + Ok(()) + } +} diff --git a/frame/nfts/src/features/mod.rs b/frame/nfts/src/features/mod.rs new file mode 100644 index 0000000000000..5661797978439 --- /dev/null +++ b/frame/nfts/src/features/mod.rs @@ -0,0 +1,18 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +pub mod buy_sell; diff --git a/frame/nfts/src/lib.rs b/frame/nfts/src/lib.rs index ddd76aba024ef..a4555cd655a08 100644 --- a/frame/nfts/src/lib.rs +++ b/frame/nfts/src/lib.rs @@ -35,6 +35,7 @@ pub mod mock; #[cfg(test)] mod tests; +mod features; mod functions; mod impl_nonfungibles; mod types; @@ -177,6 +178,10 @@ pub mod pallet { #[pallet::constant] type ApprovalsLimit: Get; + /// The max number of tips a user could send. + #[pallet::constant] + type MaxTips: Get; + #[cfg(feature = "runtime-benchmarks")] /// A set of helper functions for benchmarking. type Helper: BenchmarkHelper; @@ -420,6 +425,14 @@ pub mod pallet { seller: T::AccountId, buyer: T::AccountId, }, + /// A tip was sent. + TipSent { + collection: T::CollectionId, + item: T::ItemId, + sender: T::AccountId, + receiver: T::AccountId, + amount: DepositBalanceOf, + }, } #[pallet::error] @@ -1608,5 +1621,22 @@ pub mod pallet { let origin = ensure_signed(origin)?; Self::do_buy_item(collection, item, origin, bid_price) } + + /// Allows to pay the tips. + /// + /// Origin must be Signed. + /// + /// - `tips`: Tips array. + /// + /// Emits `TipSent` on every tip transfer. + #[pallet::weight(T::WeightInfo::pay_tips(tips.len() as u32))] + #[transactional] + pub fn pay_tips( + origin: OriginFor, + tips: BoundedVec, T::MaxTips>, + ) -> DispatchResult { + let origin = ensure_signed(origin)?; + Self::do_pay_tips(origin, tips) + } } } diff --git a/frame/nfts/src/mock.rs b/frame/nfts/src/mock.rs index 8492291bfad05..c13298aa8162d 100644 --- a/frame/nfts/src/mock.rs +++ b/frame/nfts/src/mock.rs @@ -101,6 +101,7 @@ impl Config for Test { type KeyLimit = ConstU32<50>; type ValueLimit = ConstU32<50>; type ApprovalsLimit = ConstU32<10>; + type MaxTips = ConstU32<10>; type WeightInfo = (); #[cfg(feature = "runtime-benchmarks")] type Helper = (); diff --git a/frame/nfts/src/tests.rs b/frame/nfts/src/tests.rs index ac08c35b23294..7004fb1f4e93b 100644 --- a/frame/nfts/src/tests.rs +++ b/frame/nfts/src/tests.rs @@ -1001,7 +1001,7 @@ fn buy_item_should_work() { ); // can buy when I'm a whitelisted buyer - assert_ok!(Nfts::buy_item(RuntimeOrigin::signed(user_3), collection_id, item_2, price_2,)); + assert_ok!(Nfts::buy_item(RuntimeOrigin::signed(user_3), collection_id, item_2, price_2)); assert!(events().contains(&Event::::ItemBought { collection: collection_id, @@ -1060,3 +1060,48 @@ fn buy_item_should_work() { } }); } + +#[test] +fn pay_tips_should_work() { + new_test_ext().execute_with(|| { + let user_1 = 1; + let user_2 = 2; + let user_3 = 3; + let collection_id = 0; + let item_id = 1; + let tip = 2; + let initial_balance = 100; + + Balances::make_free_balance_be(&user_1, initial_balance); + Balances::make_free_balance_be(&user_2, initial_balance); + Balances::make_free_balance_be(&user_3, initial_balance); + + assert_ok!(Nfts::pay_tips( + RuntimeOrigin::signed(user_1), + bvec![ + ItemTip { collection: collection_id, item: item_id, receiver: user_2, amount: tip }, + ItemTip { collection: collection_id, item: item_id, receiver: user_3, amount: tip }, + ] + )); + + assert_eq!(Balances::total_balance(&user_1), initial_balance - tip * 2); + assert_eq!(Balances::total_balance(&user_2), initial_balance + tip); + assert_eq!(Balances::total_balance(&user_3), initial_balance + tip); + + let events = events(); + assert!(events.contains(&Event::::TipSent { + collection: collection_id, + item: item_id, + sender: user_1, + receiver: user_2, + amount: tip, + })); + assert!(events.contains(&Event::::TipSent { + collection: collection_id, + item: item_id, + sender: user_1, + receiver: user_3, + amount: tip, + })); + }); +} diff --git a/frame/nfts/src/types.rs b/frame/nfts/src/types.rs index db1c351c4a9c5..e91c513e4c4c2 100644 --- a/frame/nfts/src/types.rs +++ b/frame/nfts/src/types.rs @@ -30,8 +30,15 @@ pub(super) type CollectionDetailsFor = CollectionDetails<::AccountId, DepositBalanceOf>; pub(super) type ItemDetailsFor = ItemDetails<::AccountId, DepositBalanceOf, ApprovalsOf>; -pub(super) type ItemPrice = +pub(super) type BalanceOf = <>::Currency as Currency<::AccountId>>::Balance; +pub(super) type ItemPrice = BalanceOf; +pub(super) type ItemTipOf = ItemTip< + >::CollectionId, + >::ItemId, + ::AccountId, + BalanceOf, +>; #[derive(Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug, TypeInfo, MaxEncodedLen)] pub struct CollectionDetails { @@ -127,3 +134,15 @@ pub struct ItemMetadata> { /// Whether the item metadata may be changed by a non Force origin. pub(super) is_frozen: bool, } + +#[derive(Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug, TypeInfo, MaxEncodedLen)] +pub struct ItemTip { + /// A collection of the item. + pub(super) collection: CollectionId, + /// An item of which the tip is send for. + pub(super) item: ItemId, + /// A sender of the tip. + pub(super) receiver: AccountId, + /// An amount the sender is willing to tip. + pub(super) amount: Amount, +} diff --git a/frame/nfts/src/weights.rs b/frame/nfts/src/weights.rs index 2c90ab54fe9fb..1dab0838f32b2 100644 --- a/frame/nfts/src/weights.rs +++ b/frame/nfts/src/weights.rs @@ -18,7 +18,7 @@ //! Autogenerated weights for pallet_nfts //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2022-09-09, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2022-09-24, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` //! HOSTNAME: `bm3`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 @@ -73,24 +73,27 @@ pub trait WeightInfo { fn set_collection_max_supply() -> Weight; fn set_price() -> Weight; fn buy_item() -> Weight; + fn pay_tips(n: u32, ) -> Weight; } /// Weights for pallet_nfts using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { + // Storage: Nfts NextCollectionId (r:1 w:1) // Storage: Nfts Class (r:1 w:1) // Storage: Nfts ClassAccount (r:0 w:1) fn create() -> Weight { - Weight::from_ref_time(33_018_000 as u64) - .saturating_add(T::DbWeight::get().reads(1 as u64)) - .saturating_add(T::DbWeight::get().writes(2 as u64)) + Weight::from_ref_time(37_627_000 as u64) + .saturating_add(T::DbWeight::get().reads(2 as u64)) + .saturating_add(T::DbWeight::get().writes(3 as u64)) } + // Storage: Nfts NextCollectionId (r:1 w:1) // Storage: Nfts Class (r:1 w:1) // Storage: Nfts ClassAccount (r:0 w:1) fn force_create() -> Weight { - Weight::from_ref_time(20_957_000 as u64) - .saturating_add(T::DbWeight::get().reads(1 as u64)) - .saturating_add(T::DbWeight::get().writes(2 as u64)) + Weight::from_ref_time(25_748_000 as u64) + .saturating_add(T::DbWeight::get().reads(2 as u64)) + .saturating_add(T::DbWeight::get().writes(3 as u64)) } // Storage: Nfts Class (r:1 w:1) // Storage: Nfts Asset (r:1 w:0) @@ -104,26 +107,24 @@ impl WeightInfo for SubstrateWeight { /// The range of component `m` is `[0, 1000]`. /// The range of component `a` is `[0, 1000]`. fn destroy(n: u32, m: u32, a: u32, ) -> Weight { - Weight::from_ref_time(0 as u64) - // Standard Error: 12_000 - .saturating_add(Weight::from_ref_time(10_091_000 as u64).saturating_mul(n as u64)) - // Standard Error: 12_000 - .saturating_add(Weight::from_ref_time(1_748_000 as u64).saturating_mul(m as u64)) - // Standard Error: 12_000 - .saturating_add(Weight::from_ref_time(1_621_000 as u64).saturating_mul(a as u64)) + Weight::from_ref_time(2_449_817_000 as u64) + // Standard Error: 27_329 + .saturating_add(Weight::from_ref_time(8_423_500 as u64).saturating_mul(n as u64)) + // Standard Error: 27_329 + .saturating_add(Weight::from_ref_time(315_839 as u64).saturating_mul(m as u64)) + // Standard Error: 27_329 + .saturating_add(Weight::from_ref_time(217_497 as u64).saturating_mul(a as u64)) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().reads((1 as u64).saturating_mul(n as u64))) - .saturating_add(T::DbWeight::get().writes(4 as u64)) - .saturating_add(T::DbWeight::get().writes((2 as u64).saturating_mul(n as u64))) - .saturating_add(T::DbWeight::get().writes((1 as u64).saturating_mul(m as u64))) - .saturating_add(T::DbWeight::get().writes((1 as u64).saturating_mul(a as u64))) + .saturating_add(T::DbWeight::get().writes(2004 as u64)) + .saturating_add(T::DbWeight::get().writes((1 as u64).saturating_mul(n as u64))) } // Storage: Nfts Asset (r:1 w:1) // Storage: Nfts Class (r:1 w:1) // Storage: Nfts CollectionMaxSupply (r:1 w:0) // Storage: Nfts Account (r:0 w:1) fn mint() -> Weight { - Weight::from_ref_time(43_007_000 as u64) + Weight::from_ref_time(43_014_000 as u64) .saturating_add(T::DbWeight::get().reads(3 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -132,7 +133,7 @@ impl WeightInfo for SubstrateWeight { // Storage: Nfts Account (r:0 w:1) // Storage: Nfts ItemPriceOf (r:0 w:1) fn burn() -> Weight { - Weight::from_ref_time(43_922_000 as u64) + Weight::from_ref_time(44_421_000 as u64) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(4 as u64)) } @@ -141,7 +142,7 @@ impl WeightInfo for SubstrateWeight { // Storage: Nfts Account (r:0 w:2) // Storage: Nfts ItemPriceOf (r:0 w:1) fn transfer() -> Weight { - Weight::from_ref_time(33_951_000 as u64) + Weight::from_ref_time(34_315_000 as u64) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(4 as u64)) } @@ -149,9 +150,9 @@ impl WeightInfo for SubstrateWeight { // Storage: Nfts Asset (r:102 w:102) /// The range of component `i` is `[0, 5000]`. fn redeposit(i: u32, ) -> Weight { - Weight::from_ref_time(0 as u64) - // Standard Error: 12_000 - .saturating_add(Weight::from_ref_time(11_194_000 as u64).saturating_mul(i as u64)) + Weight::from_ref_time(22_836_000 as u64) + // Standard Error: 9_131 + .saturating_add(Weight::from_ref_time(10_894_264 as u64).saturating_mul(i as u64)) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().reads((1 as u64).saturating_mul(i as u64))) .saturating_add(T::DbWeight::get().writes(1 as u64)) @@ -160,26 +161,26 @@ impl WeightInfo for SubstrateWeight { // Storage: Nfts Asset (r:1 w:1) // Storage: Nfts Class (r:1 w:0) fn freeze() -> Weight { - Weight::from_ref_time(26_745_000 as u64) + Weight::from_ref_time(27_329_000 as u64) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Nfts Asset (r:1 w:1) // Storage: Nfts Class (r:1 w:0) fn thaw() -> Weight { - Weight::from_ref_time(27_466_000 as u64) + Weight::from_ref_time(27_842_000 as u64) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Nfts Class (r:1 w:1) fn freeze_collection() -> Weight { - Weight::from_ref_time(22_591_000 as u64) + Weight::from_ref_time(23_129_000 as u64) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Nfts Class (r:1 w:1) fn thaw_collection() -> Weight { - Weight::from_ref_time(22_392_000 as u64) + Weight::from_ref_time(22_584_000 as u64) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } @@ -187,20 +188,20 @@ impl WeightInfo for SubstrateWeight { // Storage: Nfts Class (r:1 w:1) // Storage: Nfts ClassAccount (r:0 w:2) fn transfer_ownership() -> Weight { - Weight::from_ref_time(31_202_000 as u64) + Weight::from_ref_time(31_684_000 as u64) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(4 as u64)) } // Storage: Nfts Class (r:1 w:1) fn set_team() -> Weight { - Weight::from_ref_time(23_063_000 as u64) + Weight::from_ref_time(23_143_000 as u64) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Nfts Class (r:1 w:1) // Storage: Nfts ClassAccount (r:0 w:1) fn force_item_status() -> Weight { - Weight::from_ref_time(25_598_000 as u64) + Weight::from_ref_time(25_684_000 as u64) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(2 as u64)) } @@ -208,7 +209,7 @@ impl WeightInfo for SubstrateWeight { // Storage: Nfts InstanceMetadataOf (r:1 w:0) // Storage: Nfts Attribute (r:1 w:1) fn set_attribute() -> Weight { - Weight::from_ref_time(48_684_000 as u64) + Weight::from_ref_time(50_159_000 as u64) .saturating_add(T::DbWeight::get().reads(3 as u64)) .saturating_add(T::DbWeight::get().writes(2 as u64)) } @@ -216,76 +217,76 @@ impl WeightInfo for SubstrateWeight { // Storage: Nfts InstanceMetadataOf (r:1 w:0) // Storage: Nfts Attribute (r:1 w:1) fn clear_attribute() -> Weight { - Weight::from_ref_time(47_267_000 as u64) + Weight::from_ref_time(47_824_000 as u64) .saturating_add(T::DbWeight::get().reads(3 as u64)) .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Nfts Class (r:1 w:1) // Storage: Nfts InstanceMetadataOf (r:1 w:1) fn set_metadata() -> Weight { - Weight::from_ref_time(40_174_000 as u64) + Weight::from_ref_time(39_968_000 as u64) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Nfts Class (r:1 w:1) // Storage: Nfts InstanceMetadataOf (r:1 w:1) fn clear_metadata() -> Weight { - Weight::from_ref_time(41_611_000 as u64) + Weight::from_ref_time(42_182_000 as u64) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Nfts Class (r:1 w:1) // Storage: Nfts ClassMetadataOf (r:1 w:1) fn set_collection_metadata() -> Weight { - Weight::from_ref_time(40_073_000 as u64) + Weight::from_ref_time(39_330_000 as u64) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Nfts Class (r:1 w:0) // Storage: Nfts ClassMetadataOf (r:1 w:1) fn clear_collection_metadata() -> Weight { - Weight::from_ref_time(38_191_000 as u64) + Weight::from_ref_time(38_351_000 as u64) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Nfts Class (r:1 w:0) // Storage: Nfts Asset (r:1 w:1) fn approve_transfer() -> Weight { - Weight::from_ref_time(29_461_000 as u64) + Weight::from_ref_time(29_530_000 as u64) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Nfts Class (r:1 w:0) // Storage: Nfts Asset (r:1 w:1) fn cancel_approval() -> Weight { - Weight::from_ref_time(29_690_000 as u64) + Weight::from_ref_time(29_417_000 as u64) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Nfts Class (r:1 w:0) // Storage: Nfts Asset (r:1 w:1) fn clear_all_transfer_approvals() -> Weight { - Weight::from_ref_time(27_758_000 as u64) + Weight::from_ref_time(28_482_000 as u64) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Nfts OwnershipAcceptance (r:1 w:1) fn set_accept_ownership() -> Weight { - Weight::from_ref_time(26_425_000 as u64) + Weight::from_ref_time(25_851_000 as u64) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Nfts CollectionMaxSupply (r:1 w:1) // Storage: Nfts Class (r:1 w:0) fn set_collection_max_supply() -> Weight { - Weight::from_ref_time(24_533_000 as u64) + Weight::from_ref_time(24_836_000 as u64) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Nfts Asset (r:1 w:0) // Storage: Nfts ItemPriceOf (r:0 w:1) fn set_price() -> Weight { - Weight::from_ref_time(24_745_000 as u64) + Weight::from_ref_time(25_665_000 as u64) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } @@ -294,27 +295,35 @@ impl WeightInfo for SubstrateWeight { // Storage: Nfts Class (r:1 w:0) // Storage: Nfts Account (r:0 w:2) fn buy_item() -> Weight { - Weight::from_ref_time(47_967_000 as u64) + Weight::from_ref_time(47_502_000 as u64) .saturating_add(T::DbWeight::get().reads(3 as u64)) .saturating_add(T::DbWeight::get().writes(4 as u64)) } + /// The range of component `n` is `[0, 10]`. + fn pay_tips(n: u32, ) -> Weight { + Weight::from_ref_time(5_417_000 as u64) + // Standard Error: 32_526 + .saturating_add(Weight::from_ref_time(4_304_363 as u64).saturating_mul(n as u64)) + } } // For backwards compatibility and tests impl WeightInfo for () { + // Storage: Nfts NextCollectionId (r:1 w:1) // Storage: Nfts Class (r:1 w:1) // Storage: Nfts ClassAccount (r:0 w:1) fn create() -> Weight { - Weight::from_ref_time(33_018_000 as u64) - .saturating_add(RocksDbWeight::get().reads(1 as u64)) - .saturating_add(RocksDbWeight::get().writes(2 as u64)) + Weight::from_ref_time(37_627_000 as u64) + .saturating_add(RocksDbWeight::get().reads(2 as u64)) + .saturating_add(RocksDbWeight::get().writes(3 as u64)) } + // Storage: Nfts NextCollectionId (r:1 w:1) // Storage: Nfts Class (r:1 w:1) // Storage: Nfts ClassAccount (r:0 w:1) fn force_create() -> Weight { - Weight::from_ref_time(20_957_000 as u64) - .saturating_add(RocksDbWeight::get().reads(1 as u64)) - .saturating_add(RocksDbWeight::get().writes(2 as u64)) + Weight::from_ref_time(25_748_000 as u64) + .saturating_add(RocksDbWeight::get().reads(2 as u64)) + .saturating_add(RocksDbWeight::get().writes(3 as u64)) } // Storage: Nfts Class (r:1 w:1) // Storage: Nfts Asset (r:1 w:0) @@ -328,26 +337,24 @@ impl WeightInfo for () { /// The range of component `m` is `[0, 1000]`. /// The range of component `a` is `[0, 1000]`. fn destroy(n: u32, m: u32, a: u32, ) -> Weight { - Weight::from_ref_time(0 as u64) - // Standard Error: 12_000 - .saturating_add(Weight::from_ref_time(10_091_000 as u64).saturating_mul(n as u64)) - // Standard Error: 12_000 - .saturating_add(Weight::from_ref_time(1_748_000 as u64).saturating_mul(m as u64)) - // Standard Error: 12_000 - .saturating_add(Weight::from_ref_time(1_621_000 as u64).saturating_mul(a as u64)) + Weight::from_ref_time(2_449_817_000 as u64) + // Standard Error: 27_329 + .saturating_add(Weight::from_ref_time(8_423_500 as u64).saturating_mul(n as u64)) + // Standard Error: 27_329 + .saturating_add(Weight::from_ref_time(315_839 as u64).saturating_mul(m as u64)) + // Standard Error: 27_329 + .saturating_add(Weight::from_ref_time(217_497 as u64).saturating_mul(a as u64)) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().reads((1 as u64).saturating_mul(n as u64))) - .saturating_add(RocksDbWeight::get().writes(4 as u64)) - .saturating_add(RocksDbWeight::get().writes((2 as u64).saturating_mul(n as u64))) - .saturating_add(RocksDbWeight::get().writes((1 as u64).saturating_mul(m as u64))) - .saturating_add(RocksDbWeight::get().writes((1 as u64).saturating_mul(a as u64))) + .saturating_add(RocksDbWeight::get().writes(2004 as u64)) + .saturating_add(RocksDbWeight::get().writes((1 as u64).saturating_mul(n as u64))) } // Storage: Nfts Asset (r:1 w:1) // Storage: Nfts Class (r:1 w:1) // Storage: Nfts CollectionMaxSupply (r:1 w:0) // Storage: Nfts Account (r:0 w:1) fn mint() -> Weight { - Weight::from_ref_time(43_007_000 as u64) + Weight::from_ref_time(43_014_000 as u64) .saturating_add(RocksDbWeight::get().reads(3 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -356,7 +363,7 @@ impl WeightInfo for () { // Storage: Nfts Account (r:0 w:1) // Storage: Nfts ItemPriceOf (r:0 w:1) fn burn() -> Weight { - Weight::from_ref_time(43_922_000 as u64) + Weight::from_ref_time(44_421_000 as u64) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(4 as u64)) } @@ -365,7 +372,7 @@ impl WeightInfo for () { // Storage: Nfts Account (r:0 w:2) // Storage: Nfts ItemPriceOf (r:0 w:1) fn transfer() -> Weight { - Weight::from_ref_time(33_951_000 as u64) + Weight::from_ref_time(34_315_000 as u64) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(4 as u64)) } @@ -373,9 +380,9 @@ impl WeightInfo for () { // Storage: Nfts Asset (r:102 w:102) /// The range of component `i` is `[0, 5000]`. fn redeposit(i: u32, ) -> Weight { - Weight::from_ref_time(0 as u64) - // Standard Error: 12_000 - .saturating_add(Weight::from_ref_time(11_194_000 as u64).saturating_mul(i as u64)) + Weight::from_ref_time(22_836_000 as u64) + // Standard Error: 9_131 + .saturating_add(Weight::from_ref_time(10_894_264 as u64).saturating_mul(i as u64)) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().reads((1 as u64).saturating_mul(i as u64))) .saturating_add(RocksDbWeight::get().writes(1 as u64)) @@ -384,26 +391,26 @@ impl WeightInfo for () { // Storage: Nfts Asset (r:1 w:1) // Storage: Nfts Class (r:1 w:0) fn freeze() -> Weight { - Weight::from_ref_time(26_745_000 as u64) + Weight::from_ref_time(27_329_000 as u64) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Nfts Asset (r:1 w:1) // Storage: Nfts Class (r:1 w:0) fn thaw() -> Weight { - Weight::from_ref_time(27_466_000 as u64) + Weight::from_ref_time(27_842_000 as u64) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Nfts Class (r:1 w:1) fn freeze_collection() -> Weight { - Weight::from_ref_time(22_591_000 as u64) + Weight::from_ref_time(23_129_000 as u64) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Nfts Class (r:1 w:1) fn thaw_collection() -> Weight { - Weight::from_ref_time(22_392_000 as u64) + Weight::from_ref_time(22_584_000 as u64) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } @@ -411,20 +418,20 @@ impl WeightInfo for () { // Storage: Nfts Class (r:1 w:1) // Storage: Nfts ClassAccount (r:0 w:2) fn transfer_ownership() -> Weight { - Weight::from_ref_time(31_202_000 as u64) + Weight::from_ref_time(31_684_000 as u64) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(4 as u64)) } // Storage: Nfts Class (r:1 w:1) fn set_team() -> Weight { - Weight::from_ref_time(23_063_000 as u64) + Weight::from_ref_time(23_143_000 as u64) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Nfts Class (r:1 w:1) // Storage: Nfts ClassAccount (r:0 w:1) fn force_item_status() -> Weight { - Weight::from_ref_time(25_598_000 as u64) + Weight::from_ref_time(25_684_000 as u64) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(2 as u64)) } @@ -432,7 +439,7 @@ impl WeightInfo for () { // Storage: Nfts InstanceMetadataOf (r:1 w:0) // Storage: Nfts Attribute (r:1 w:1) fn set_attribute() -> Weight { - Weight::from_ref_time(48_684_000 as u64) + Weight::from_ref_time(50_159_000 as u64) .saturating_add(RocksDbWeight::get().reads(3 as u64)) .saturating_add(RocksDbWeight::get().writes(2 as u64)) } @@ -440,76 +447,76 @@ impl WeightInfo for () { // Storage: Nfts InstanceMetadataOf (r:1 w:0) // Storage: Nfts Attribute (r:1 w:1) fn clear_attribute() -> Weight { - Weight::from_ref_time(47_267_000 as u64) + Weight::from_ref_time(47_824_000 as u64) .saturating_add(RocksDbWeight::get().reads(3 as u64)) .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Nfts Class (r:1 w:1) // Storage: Nfts InstanceMetadataOf (r:1 w:1) fn set_metadata() -> Weight { - Weight::from_ref_time(40_174_000 as u64) + Weight::from_ref_time(39_968_000 as u64) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Nfts Class (r:1 w:1) // Storage: Nfts InstanceMetadataOf (r:1 w:1) fn clear_metadata() -> Weight { - Weight::from_ref_time(41_611_000 as u64) + Weight::from_ref_time(42_182_000 as u64) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Nfts Class (r:1 w:1) // Storage: Nfts ClassMetadataOf (r:1 w:1) fn set_collection_metadata() -> Weight { - Weight::from_ref_time(40_073_000 as u64) + Weight::from_ref_time(39_330_000 as u64) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Nfts Class (r:1 w:0) // Storage: Nfts ClassMetadataOf (r:1 w:1) fn clear_collection_metadata() -> Weight { - Weight::from_ref_time(38_191_000 as u64) + Weight::from_ref_time(38_351_000 as u64) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Nfts Class (r:1 w:0) // Storage: Nfts Asset (r:1 w:1) fn approve_transfer() -> Weight { - Weight::from_ref_time(29_461_000 as u64) + Weight::from_ref_time(29_530_000 as u64) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Nfts Class (r:1 w:0) // Storage: Nfts Asset (r:1 w:1) fn cancel_approval() -> Weight { - Weight::from_ref_time(29_690_000 as u64) + Weight::from_ref_time(29_417_000 as u64) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Nfts Class (r:1 w:0) // Storage: Nfts Asset (r:1 w:1) fn clear_all_transfer_approvals() -> Weight { - Weight::from_ref_time(27_758_000 as u64) + Weight::from_ref_time(28_482_000 as u64) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Nfts OwnershipAcceptance (r:1 w:1) fn set_accept_ownership() -> Weight { - Weight::from_ref_time(26_425_000 as u64) + Weight::from_ref_time(25_851_000 as u64) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Nfts CollectionMaxSupply (r:1 w:1) // Storage: Nfts Class (r:1 w:0) fn set_collection_max_supply() -> Weight { - Weight::from_ref_time(24_533_000 as u64) + Weight::from_ref_time(24_836_000 as u64) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Nfts Asset (r:1 w:0) // Storage: Nfts ItemPriceOf (r:0 w:1) fn set_price() -> Weight { - Weight::from_ref_time(24_745_000 as u64) + Weight::from_ref_time(25_665_000 as u64) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } @@ -518,8 +525,14 @@ impl WeightInfo for () { // Storage: Nfts Class (r:1 w:0) // Storage: Nfts Account (r:0 w:2) fn buy_item() -> Weight { - Weight::from_ref_time(47_967_000 as u64) + Weight::from_ref_time(47_502_000 as u64) .saturating_add(RocksDbWeight::get().reads(3 as u64)) .saturating_add(RocksDbWeight::get().writes(4 as u64)) } + /// The range of component `n` is `[0, 10]`. + fn pay_tips(n: u32, ) -> Weight { + Weight::from_ref_time(5_417_000 as u64) + // Standard Error: 32_526 + .saturating_add(Weight::from_ref_time(4_304_363 as u64).saturating_mul(n as u64)) + } } From 490a0fad02d1d0d3cd494eb48d1bca5f67e094ba Mon Sep 17 00:00:00 2001 From: Jegor Sidorenko <5252494+jsidorenko@users.noreply.github.com> Date: Wed, 5 Oct 2022 13:58:04 +0300 Subject: [PATCH 013/101] [Uniques V2] Atomic NFTs swap (#12285) * Atomic NFTs swap * Fmt * Fix benchmark * Rename swap -> atomic_swap * Update target balance * Rollback * Fix * ".git/.scripts/bench-bot.sh" pallet dev pallet_nfts * Make desired item optional * Apply suggestions * Update frame/nfts/src/features/atomic_swap.rs Co-authored-by: Squirrel * Rename fields * Optimisation * Add a comment * deadline -> maybe_deadline * Add docs * Change comments * Add price direction field * ".git/.scripts/bench-bot.sh" pallet dev pallet_nfts * Wrap price and direction * Fix benchmarks * Use ensure! instead of if {} * Make duration param mandatory and limit it to MaxDeadlineDuration * Make the code safer * Fix clippy * Chore * Remove unused vars * try * try 2 * try 3 Co-authored-by: command-bot <> Co-authored-by: Squirrel --- bin/node/runtime/src/lib.rs | 2 + frame/nfts/src/benchmarking.rs | 84 ++++++- frame/nfts/src/features/atomic_swap.rs | 175 ++++++++++++++ frame/nfts/src/features/mod.rs | 1 + frame/nfts/src/functions.rs | 4 + frame/nfts/src/lib.rs | 145 ++++++++++++ frame/nfts/src/mock.rs | 1 + frame/nfts/src/tests.rs | 315 ++++++++++++++++++++++++- frame/nfts/src/types.rs | 26 ++ frame/nfts/src/weights.rs | 223 ++++++++++------- 10 files changed, 891 insertions(+), 85 deletions(-) create mode 100644 frame/nfts/src/features/atomic_swap.rs diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index eb6941e85215c..875346472f21a 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -1473,6 +1473,7 @@ parameter_types! { pub const ValueLimit: u32 = 256; pub const ApprovalsLimit: u32 = 20; pub const MaxTips: u32 = 10; + pub const MaxDeadlineDuration: BlockNumber = 12 * 30 * DAYS; } impl pallet_uniques::Config for Runtime { @@ -1512,6 +1513,7 @@ impl pallet_nfts::Config for Runtime { type ValueLimit = ValueLimit; type ApprovalsLimit = ApprovalsLimit; type MaxTips = MaxTips; + type MaxDeadlineDuration = MaxDeadlineDuration; type WeightInfo = pallet_nfts::weights::SubstrateWeight; #[cfg(feature = "runtime-benchmarks")] type Helper = (); diff --git a/frame/nfts/src/benchmarking.rs b/frame/nfts/src/benchmarking.rs index 00527abc99e02..b35122cf919c7 100644 --- a/frame/nfts/src/benchmarking.rs +++ b/frame/nfts/src/benchmarking.rs @@ -29,7 +29,7 @@ use frame_support::{ BoundedVec, }; use frame_system::RawOrigin as SystemOrigin; -use sp_runtime::traits::Bounded; +use sp_runtime::traits::{Bounded, One}; use sp_std::prelude::*; use crate::Pallet as Nfts; @@ -476,5 +476,87 @@ benchmarks_instance_pallet! { } } + create_swap { + let (collection, caller, _) = create_collection::(); + let (item1, ..) = mint_item::(0); + let (item2, ..) = mint_item::(1); + let price = ItemPrice::::from(100u32); + let price_direction = PriceDirection::Receive; + let price_with_direction = PriceWithDirection { amount: price, direction: price_direction }; + let duration = T::MaxDeadlineDuration::get(); + frame_system::Pallet::::set_block_number(One::one()); + }: _(SystemOrigin::Signed(caller.clone()), collection, item1, collection, Some(item2), Some(price_with_direction.clone()), duration) + verify { + let current_block = frame_system::Pallet::::block_number(); + assert_last_event::(Event::SwapCreated { + offered_collection: collection, + offered_item: item1, + desired_collection: collection, + desired_item: Some(item2), + price: Some(price_with_direction), + deadline: current_block.saturating_add(duration), + }.into()); + } + + cancel_swap { + let (collection, caller, _) = create_collection::(); + let (item1, ..) = mint_item::(0); + let (item2, ..) = mint_item::(1); + let price = ItemPrice::::from(100u32); + let origin = SystemOrigin::Signed(caller.clone()).into(); + let duration = T::MaxDeadlineDuration::get(); + let price_direction = PriceDirection::Receive; + let price_with_direction = PriceWithDirection { amount: price, direction: price_direction }; + frame_system::Pallet::::set_block_number(One::one()); + Nfts::::create_swap(origin, collection, item1, collection, Some(item2), Some(price_with_direction.clone()), duration)?; + }: _(SystemOrigin::Signed(caller.clone()), collection, item1) + verify { + assert_last_event::(Event::SwapCancelled { + offered_collection: collection, + offered_item: item1, + desired_collection: collection, + desired_item: Some(item2), + price: Some(price_with_direction), + deadline: duration.saturating_add(One::one()), + }.into()); + } + + claim_swap { + let (collection, caller, _) = create_collection::(); + let (item1, ..) = mint_item::(0); + let (item2, ..) = mint_item::(1); + let price = ItemPrice::::from(0u32); + let price_direction = PriceDirection::Receive; + let price_with_direction = PriceWithDirection { amount: price, direction: price_direction }; + let duration = T::MaxDeadlineDuration::get(); + let target: T::AccountId = account("target", 0, SEED); + let target_lookup = T::Lookup::unlookup(target.clone()); + let origin = SystemOrigin::Signed(caller.clone()); + frame_system::Pallet::::set_block_number(One::one()); + Nfts::::transfer(origin.clone().into(), collection, item2, target_lookup)?; + Nfts::::create_swap( + origin.clone().into(), + collection, + item1, + collection, + Some(item2), + Some(price_with_direction.clone()), + duration, + )?; + }: _(SystemOrigin::Signed(target.clone()), collection, item2, collection, item1, Some(price_with_direction.clone())) + verify { + let current_block = frame_system::Pallet::::block_number(); + assert_last_event::(Event::SwapClaimed { + sent_collection: collection, + sent_item: item2, + sent_item_owner: target, + received_collection: collection, + received_item: item1, + received_item_owner: caller, + price: Some(price_with_direction), + deadline: duration.saturating_add(One::one()), + }.into()); + } + impl_benchmark_test_suite!(Nfts, crate::mock::new_test_ext(), crate::mock::Test); } diff --git a/frame/nfts/src/features/atomic_swap.rs b/frame/nfts/src/features/atomic_swap.rs new file mode 100644 index 0000000000000..116da57477f4e --- /dev/null +++ b/frame/nfts/src/features/atomic_swap.rs @@ -0,0 +1,175 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::*; +use frame_support::{ + pallet_prelude::*, + traits::{Currency, ExistenceRequirement::KeepAlive}, +}; + +impl, I: 'static> Pallet { + pub fn do_create_swap( + caller: T::AccountId, + offered_collection_id: T::CollectionId, + offered_item_id: T::ItemId, + desired_collection_id: T::CollectionId, + maybe_desired_item_id: Option, + maybe_price: Option>>, + duration: ::BlockNumber, + ) -> DispatchResult { + ensure!(duration <= T::MaxDeadlineDuration::get(), Error::::WrongDuration); + + let item = Item::::get(&offered_collection_id, &offered_item_id) + .ok_or(Error::::UnknownItem)?; + ensure!(item.owner == caller, Error::::NoPermission); + + match maybe_desired_item_id { + Some(desired_item_id) => ensure!( + Item::::contains_key(&desired_collection_id, &desired_item_id), + Error::::UnknownItem + ), + None => ensure!( + Collection::::contains_key(&desired_collection_id), + Error::::UnknownCollection + ), + }; + + let now = frame_system::Pallet::::block_number(); + let deadline = duration.saturating_add(now); + + PendingSwapOf::::insert( + &offered_collection_id, + &offered_item_id, + PendingSwap { + desired_collection: desired_collection_id, + desired_item: maybe_desired_item_id, + price: maybe_price.clone(), + deadline, + }, + ); + + Self::deposit_event(Event::SwapCreated { + offered_collection: offered_collection_id, + offered_item: offered_item_id, + desired_collection: desired_collection_id, + desired_item: maybe_desired_item_id, + price: maybe_price, + deadline, + }); + + Ok(()) + } + + pub fn do_cancel_swap( + caller: T::AccountId, + offered_collection_id: T::CollectionId, + offered_item_id: T::ItemId, + ) -> DispatchResult { + let swap = PendingSwapOf::::get(&offered_collection_id, &offered_item_id) + .ok_or(Error::::UnknownSwap)?; + + let now = frame_system::Pallet::::block_number(); + if swap.deadline > now { + let item = Item::::get(&offered_collection_id, &offered_item_id) + .ok_or(Error::::UnknownItem)?; + ensure!(item.owner == caller, Error::::NoPermission); + } + + PendingSwapOf::::remove(&offered_collection_id, &offered_item_id); + + Self::deposit_event(Event::SwapCancelled { + offered_collection: offered_collection_id, + offered_item: offered_item_id, + desired_collection: swap.desired_collection, + desired_item: swap.desired_item, + price: swap.price, + deadline: swap.deadline, + }); + + Ok(()) + } + + pub fn do_claim_swap( + caller: T::AccountId, + send_collection_id: T::CollectionId, + send_item_id: T::ItemId, + receive_collection_id: T::CollectionId, + receive_item_id: T::ItemId, + witness_price: Option>>, + ) -> DispatchResult { + let send_item = Item::::get(&send_collection_id, &send_item_id) + .ok_or(Error::::UnknownItem)?; + let receive_item = Item::::get(&receive_collection_id, &receive_item_id) + .ok_or(Error::::UnknownItem)?; + let swap = PendingSwapOf::::get(&receive_collection_id, &receive_item_id) + .ok_or(Error::::UnknownSwap)?; + + ensure!(send_item.owner == caller, Error::::NoPermission); + ensure!( + swap.desired_collection == send_collection_id && swap.price == witness_price, + Error::::UnknownSwap + ); + + if let Some(desired_item) = swap.desired_item { + ensure!(desired_item == send_item_id, Error::::UnknownSwap); + } + + let now = frame_system::Pallet::::block_number(); + ensure!(now <= swap.deadline, Error::::DeadlineExpired); + + if let Some(ref price) = swap.price { + match price.direction { + PriceDirection::Send => T::Currency::transfer( + &receive_item.owner, + &send_item.owner, + price.amount, + KeepAlive, + )?, + PriceDirection::Receive => T::Currency::transfer( + &send_item.owner, + &receive_item.owner, + price.amount, + KeepAlive, + )?, + }; + } + + // This also removes the swap. + Self::do_transfer(send_collection_id, send_item_id, receive_item.owner.clone(), |_, _| { + Ok(()) + })?; + Self::do_transfer( + receive_collection_id, + receive_item_id, + send_item.owner.clone(), + |_, _| Ok(()), + )?; + + Self::deposit_event(Event::SwapClaimed { + sent_collection: send_collection_id, + sent_item: send_item_id, + sent_item_owner: send_item.owner, + received_collection: receive_collection_id, + received_item: receive_item_id, + received_item_owner: receive_item.owner, + price: swap.price, + deadline: swap.deadline, + }); + + Ok(()) + } +} diff --git a/frame/nfts/src/features/mod.rs b/frame/nfts/src/features/mod.rs index 5661797978439..24b58dee5f3a0 100644 --- a/frame/nfts/src/features/mod.rs +++ b/frame/nfts/src/features/mod.rs @@ -15,4 +15,5 @@ // See the License for the specific language governing permissions and // limitations under the License. +pub mod atomic_swap; pub mod buy_sell; diff --git a/frame/nfts/src/functions.rs b/frame/nfts/src/functions.rs index f935a5b2eba90..17be672834bf2 100644 --- a/frame/nfts/src/functions.rs +++ b/frame/nfts/src/functions.rs @@ -56,6 +56,7 @@ impl, I: 'static> Pallet { Item::::insert(&collection, &item, &details); ItemPriceOf::::remove(&collection, &item); + PendingSwapOf::::remove(&collection, &item); Self::deposit_event(Event::Transferred { collection, @@ -129,6 +130,8 @@ impl, I: 'static> Pallet { ItemMetadataOf::::remove_prefix(&collection, None); #[allow(deprecated)] ItemPriceOf::::remove_prefix(&collection, None); + #[allow(deprecated)] + PendingSwapOf::::remove_prefix(&collection, None); CollectionMetadataOf::::remove(&collection); #[allow(deprecated)] Attribute::::remove_prefix((&collection,), None); @@ -219,6 +222,7 @@ impl, I: 'static> Pallet { Item::::remove(&collection, &item); Account::::remove((&owner, &collection, &item)); ItemPriceOf::::remove(&collection, &item); + PendingSwapOf::::remove(&collection, &item); Self::deposit_event(Event::Burned { collection, item, owner }); Ok(()) diff --git a/frame/nfts/src/lib.rs b/frame/nfts/src/lib.rs index a4555cd655a08..7e9b2a42f7e14 100644 --- a/frame/nfts/src/lib.rs +++ b/frame/nfts/src/lib.rs @@ -182,6 +182,10 @@ pub mod pallet { #[pallet::constant] type MaxTips: Get; + /// The max duration in blocks for deadlines. + #[pallet::constant] + type MaxDeadlineDuration: Get<::BlockNumber>; + #[cfg(feature = "runtime-benchmarks")] /// A set of helper functions for benchmarking. type Helper: BenchmarkHelper; @@ -312,6 +316,23 @@ pub mod pallet { pub(super) type NextCollectionId, I: 'static = ()> = StorageValue<_, T::CollectionId, OptionQuery>; + #[pallet::storage] + /// Handles all the pending swaps. + pub(super) type PendingSwapOf, I: 'static = ()> = StorageDoubleMap< + _, + Blake2_128Concat, + T::CollectionId, + Blake2_128Concat, + T::ItemId, + PendingSwap< + T::CollectionId, + T::ItemId, + PriceWithDirection>, + ::BlockNumber, + >, + OptionQuery, + >; + #[pallet::event] #[pallet::generate_deposit(pub(super) fn deposit_event)] pub enum Event, I: 'static = ()> { @@ -433,6 +454,35 @@ pub mod pallet { receiver: T::AccountId, amount: DepositBalanceOf, }, + /// An `item` swap intent was created. + SwapCreated { + offered_collection: T::CollectionId, + offered_item: T::ItemId, + desired_collection: T::CollectionId, + desired_item: Option, + price: Option>>, + deadline: ::BlockNumber, + }, + /// The swap was cancelled. + SwapCancelled { + offered_collection: T::CollectionId, + offered_item: T::ItemId, + desired_collection: T::CollectionId, + desired_item: Option, + price: Option>>, + deadline: ::BlockNumber, + }, + /// The swap has been claimed. + SwapClaimed { + sent_collection: T::CollectionId, + sent_item: T::ItemId, + sent_item_owner: T::AccountId, + received_collection: T::CollectionId, + received_item: T::ItemId, + received_item_owner: T::AccountId, + price: Option>>, + deadline: ::BlockNumber, + }, } #[pallet::error] @@ -471,12 +521,18 @@ pub mod pallet { MaxSupplyTooSmall, /// The given item ID is unknown. UnknownItem, + /// Swap doesn't exist. + UnknownSwap, /// Item is not for sale. NotForSale, /// The provided bid is too low. BidTooLow, /// The item has reached its approval limit. ReachedApprovalLimit, + /// The deadline has already expired. + DeadlineExpired, + /// The duration provided should be less or equal to MaxDeadlineDuration. + WrongDuration, } impl, I: 'static> Pallet { @@ -1638,5 +1694,94 @@ pub mod pallet { let origin = ensure_signed(origin)?; Self::do_pay_tips(origin, tips) } + + /// Register a new atomic swap, declaring an intention to send an `item` in exchange for + /// `desired_item` from origin to target on the current blockchain. + /// The target can execute the swap during the specified `duration` of blocks (if set). + /// Additionally, the price could be set for the desired `item`. + /// + /// Origin must be Signed and must be an owner of the `item`. + /// + /// - `collection`: The collection of the item. + /// - `item`: The item an owner wants to give. + /// - `desired_collection`: The collection of the desired item. + /// - `desired_item`: The desired item an owner wants to receive. + /// - `maybe_price`: The price an owner is willing to pay or receive for the desired `item`. + /// - `maybe_duration`: Optional deadline for the swap. Specified by providing the + /// number of blocks after which the swap will expire. + /// + /// Emits `SwapCreated` on success. + #[pallet::weight(T::WeightInfo::create_swap())] + pub fn create_swap( + origin: OriginFor, + offered_collection: T::CollectionId, + offered_item: T::ItemId, + desired_collection: T::CollectionId, + maybe_desired_item: Option, + maybe_price: Option>>, + duration: ::BlockNumber, + ) -> DispatchResult { + let origin = ensure_signed(origin)?; + Self::do_create_swap( + origin, + offered_collection, + offered_item, + desired_collection, + maybe_desired_item, + maybe_price, + duration, + ) + } + + /// Cancel an atomic swap. + /// + /// Origin must be Signed. + /// Origin must be an owner of the `item` if the deadline hasn't expired. + /// + /// - `collection`: The collection of the item. + /// - `item`: The item an owner wants to give. + /// + /// Emits `SwapCancelled` on success. + #[pallet::weight(T::WeightInfo::cancel_swap())] + pub fn cancel_swap( + origin: OriginFor, + offered_collection: T::CollectionId, + offered_item: T::ItemId, + ) -> DispatchResult { + let origin = ensure_signed(origin)?; + Self::do_cancel_swap(origin, offered_collection, offered_item) + } + + /// Claim an atomic swap. + /// This method executes a pending swap, that was created by a counterpart before. + /// + /// Origin must be Signed and must be an owner of the `item`. + /// + /// - `send_collection`: The collection of the item to be sent. + /// - `send_item`: The item to be sent. + /// - `receive_collection`: The collection of the item to be received. + /// - `receive_item`: The item to be received. + /// - `witness_price`: A price that was previously agreed on. + /// + /// Emits `SwapClaimed` on success. + #[pallet::weight(T::WeightInfo::claim_swap())] + pub fn claim_swap( + origin: OriginFor, + send_collection: T::CollectionId, + send_item: T::ItemId, + receive_collection: T::CollectionId, + receive_item: T::ItemId, + witness_price: Option>>, + ) -> DispatchResult { + let origin = ensure_signed(origin)?; + Self::do_claim_swap( + origin, + send_collection, + send_item, + receive_collection, + receive_item, + witness_price, + ) + } } } diff --git a/frame/nfts/src/mock.rs b/frame/nfts/src/mock.rs index c13298aa8162d..23493829eaca7 100644 --- a/frame/nfts/src/mock.rs +++ b/frame/nfts/src/mock.rs @@ -102,6 +102,7 @@ impl Config for Test { type ValueLimit = ConstU32<50>; type ApprovalsLimit = ConstU32<10>; type MaxTips = ConstU32<10>; + type MaxDeadlineDuration = ConstU64<10000>; type WeightInfo = (); #[cfg(feature = "runtime-benchmarks")] type Helper = (); diff --git a/frame/nfts/src/tests.rs b/frame/nfts/src/tests.rs index 7004fb1f4e93b..0d2d0c661b273 100644 --- a/frame/nfts/src/tests.rs +++ b/frame/nfts/src/tests.rs @@ -18,7 +18,11 @@ //! Tests for Nfts pallet. use crate::{mock::*, Event, *}; -use frame_support::{assert_noop, assert_ok, dispatch::Dispatchable, traits::Currency}; +use frame_support::{ + assert_noop, assert_ok, + dispatch::Dispatchable, + traits::{Currency, Get}, +}; use pallet_balances::Error as BalancesError; use sp_std::prelude::*; @@ -1105,3 +1109,312 @@ fn pay_tips_should_work() { })); }); } + +#[test] +fn create_cancel_swap_should_work() { + new_test_ext().execute_with(|| { + System::set_block_number(1); + let user_id = 1; + let collection_id = 0; + let item_1 = 1; + let item_2 = 2; + let price = 1; + let price_direction = PriceDirection::Receive; + let price_with_direction = PriceWithDirection { amount: price, direction: price_direction }; + let duration = 2; + let expect_deadline = 3; + + assert_ok!(Nfts::force_create(RuntimeOrigin::root(), user_id, true)); + + assert_ok!(Nfts::mint(RuntimeOrigin::signed(user_id), collection_id, item_1, user_id)); + assert_ok!(Nfts::mint(RuntimeOrigin::signed(user_id), collection_id, item_2, user_id)); + + // validate desired item and the collection exists + assert_noop!( + Nfts::create_swap( + RuntimeOrigin::signed(user_id), + collection_id, + item_1, + collection_id, + Some(item_2 + 1), + Some(price_with_direction.clone()), + duration, + ), + Error::::UnknownItem + ); + assert_noop!( + Nfts::create_swap( + RuntimeOrigin::signed(user_id), + collection_id, + item_1, + collection_id + 1, + None, + Some(price_with_direction.clone()), + duration, + ), + Error::::UnknownCollection + ); + + let max_duration: u64 = ::MaxDeadlineDuration::get(); + assert_noop!( + Nfts::create_swap( + RuntimeOrigin::signed(user_id), + collection_id, + item_1, + collection_id, + Some(item_2), + Some(price_with_direction.clone()), + max_duration.saturating_add(1), + ), + Error::::WrongDuration + ); + + assert_ok!(Nfts::create_swap( + RuntimeOrigin::signed(user_id), + collection_id, + item_1, + collection_id, + Some(item_2), + Some(price_with_direction.clone()), + duration, + )); + + let swap = PendingSwapOf::::get(collection_id, item_1).unwrap(); + assert_eq!(swap.desired_collection, collection_id); + assert_eq!(swap.desired_item, Some(item_2)); + assert_eq!(swap.price, Some(price_with_direction.clone())); + assert_eq!(swap.deadline, expect_deadline); + + assert!(events().contains(&Event::::SwapCreated { + offered_collection: collection_id, + offered_item: item_1, + desired_collection: collection_id, + desired_item: Some(item_2), + price: Some(price_with_direction.clone()), + deadline: expect_deadline, + })); + + // validate we can cancel the swap + assert_ok!(Nfts::cancel_swap(RuntimeOrigin::signed(user_id), collection_id, item_1)); + assert!(events().contains(&Event::::SwapCancelled { + offered_collection: collection_id, + offered_item: item_1, + desired_collection: collection_id, + desired_item: Some(item_2), + price: Some(price_with_direction.clone()), + deadline: expect_deadline, + })); + assert!(!PendingSwapOf::::contains_key(collection_id, item_1)); + + // validate anyone can cancel the expired swap + assert_ok!(Nfts::create_swap( + RuntimeOrigin::signed(user_id), + collection_id, + item_1, + collection_id, + Some(item_2), + Some(price_with_direction.clone()), + duration, + )); + assert_noop!( + Nfts::cancel_swap(RuntimeOrigin::signed(user_id + 1), collection_id, item_1), + Error::::NoPermission + ); + System::set_block_number(expect_deadline + 1); + assert_ok!(Nfts::cancel_swap(RuntimeOrigin::signed(user_id + 1), collection_id, item_1)); + + // validate optional desired_item param + assert_ok!(Nfts::create_swap( + RuntimeOrigin::signed(user_id), + collection_id, + item_1, + collection_id, + None, + Some(price_with_direction), + duration, + )); + + let swap = PendingSwapOf::::get(collection_id, item_1).unwrap(); + assert_eq!(swap.desired_item, None); + }); +} + +#[test] +fn claim_swap_should_work() { + new_test_ext().execute_with(|| { + System::set_block_number(1); + let user_1 = 1; + let user_2 = 2; + let collection_id = 0; + let item_1 = 1; + let item_2 = 2; + let item_3 = 3; + let item_4 = 4; + let item_5 = 5; + let price = 100; + let price_direction = PriceDirection::Receive; + let price_with_direction = + PriceWithDirection { amount: price, direction: price_direction.clone() }; + let duration = 2; + let initial_balance = 1000; + let deadline = 1 + duration; + + Balances::make_free_balance_be(&user_1, initial_balance); + Balances::make_free_balance_be(&user_2, initial_balance); + + assert_ok!(Nfts::force_create(RuntimeOrigin::root(), user_1, true)); + + assert_ok!(Nfts::mint(RuntimeOrigin::signed(user_1), collection_id, item_1, user_1)); + assert_ok!(Nfts::mint(RuntimeOrigin::signed(user_1), collection_id, item_2, user_2)); + assert_ok!(Nfts::mint(RuntimeOrigin::signed(user_1), collection_id, item_3, user_2)); + assert_ok!(Nfts::mint(RuntimeOrigin::signed(user_1), collection_id, item_4, user_1)); + assert_ok!(Nfts::mint(RuntimeOrigin::signed(user_1), collection_id, item_5, user_2)); + + assert_ok!(Nfts::create_swap( + RuntimeOrigin::signed(user_1), + collection_id, + item_1, + collection_id, + Some(item_2), + Some(price_with_direction.clone()), + duration, + )); + + // validate the deadline + System::set_block_number(5); + assert_noop!( + Nfts::claim_swap( + RuntimeOrigin::signed(user_2), + collection_id, + item_2, + collection_id, + item_1, + Some(price_with_direction.clone()), + ), + Error::::DeadlineExpired + ); + System::set_block_number(1); + + // validate edge cases + assert_noop!( + Nfts::claim_swap( + RuntimeOrigin::signed(user_2), + collection_id, + item_2, + collection_id, + item_4, // no swap was created for that asset + Some(price_with_direction.clone()), + ), + Error::::UnknownSwap + ); + assert_noop!( + Nfts::claim_swap( + RuntimeOrigin::signed(user_2), + collection_id, + item_4, // not my item + collection_id, + item_1, + Some(price_with_direction.clone()), + ), + Error::::NoPermission + ); + assert_noop!( + Nfts::claim_swap( + RuntimeOrigin::signed(user_2), + collection_id, + item_5, // my item, but not the one another part wants + collection_id, + item_1, + Some(price_with_direction.clone()), + ), + Error::::UnknownSwap + ); + assert_noop!( + Nfts::claim_swap( + RuntimeOrigin::signed(user_2), + collection_id, + item_2, + collection_id, + item_1, + Some(PriceWithDirection { amount: price + 1, direction: price_direction.clone() }), // wrong price + ), + Error::::UnknownSwap + ); + assert_noop!( + Nfts::claim_swap( + RuntimeOrigin::signed(user_2), + collection_id, + item_2, + collection_id, + item_1, + Some(PriceWithDirection { amount: price, direction: PriceDirection::Send }), // wrong direction + ), + Error::::UnknownSwap + ); + + assert_ok!(Nfts::claim_swap( + RuntimeOrigin::signed(user_2), + collection_id, + item_2, + collection_id, + item_1, + Some(price_with_direction.clone()), + )); + + // validate the new owner + let item = Item::::get(collection_id, item_1).unwrap(); + assert_eq!(item.owner, user_2); + let item = Item::::get(collection_id, item_2).unwrap(); + assert_eq!(item.owner, user_1); + + // validate the balances + assert_eq!(Balances::total_balance(&user_1), initial_balance + price); + assert_eq!(Balances::total_balance(&user_2), initial_balance - price); + + // ensure we reset the swap + assert!(!PendingSwapOf::::contains_key(collection_id, item_1)); + + // validate the event + assert!(events().contains(&Event::::SwapClaimed { + sent_collection: collection_id, + sent_item: item_2, + sent_item_owner: user_2, + received_collection: collection_id, + received_item: item_1, + received_item_owner: user_1, + price: Some(price_with_direction.clone()), + deadline, + })); + + // validate the optional desired_item param and another price direction + let price_direction = PriceDirection::Send; + let price_with_direction = PriceWithDirection { amount: price, direction: price_direction }; + Balances::make_free_balance_be(&user_1, initial_balance); + Balances::make_free_balance_be(&user_2, initial_balance); + + assert_ok!(Nfts::create_swap( + RuntimeOrigin::signed(user_1), + collection_id, + item_4, + collection_id, + None, + Some(price_with_direction.clone()), + duration, + )); + assert_ok!(Nfts::claim_swap( + RuntimeOrigin::signed(user_2), + collection_id, + item_1, + collection_id, + item_4, + Some(price_with_direction), + )); + let item = Item::::get(collection_id, item_1).unwrap(); + assert_eq!(item.owner, user_1); + let item = Item::::get(collection_id, item_4).unwrap(); + assert_eq!(item.owner, user_2); + + assert_eq!(Balances::total_balance(&user_1), initial_balance - price); + assert_eq!(Balances::total_balance(&user_2), initial_balance + price); + }); +} diff --git a/frame/nfts/src/types.rs b/frame/nfts/src/types.rs index e91c513e4c4c2..399de3c5dad1e 100644 --- a/frame/nfts/src/types.rs +++ b/frame/nfts/src/types.rs @@ -146,3 +146,29 @@ pub struct ItemTip { /// An amount the sender is willing to tip. pub(super) amount: Amount, } + +#[derive(Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug, Default, TypeInfo, MaxEncodedLen)] +pub struct PendingSwap { + /// A collection of the item user wants to receive. + pub(super) desired_collection: CollectionId, + /// An item user wants to receive. + pub(super) desired_item: Option, + /// A price for the desired `item` with the direction. + pub(super) price: Option, + /// An optional deadline for the swap. + pub(super) deadline: Deadline, +} + +#[derive(Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug, TypeInfo, MaxEncodedLen)] +pub enum PriceDirection { + Send, + Receive, +} + +#[derive(Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug, TypeInfo, MaxEncodedLen)] +pub struct PriceWithDirection { + /// An amount. + pub(super) amount: Amount, + /// A direction (send or receive). + pub(super) direction: PriceDirection, +} diff --git a/frame/nfts/src/weights.rs b/frame/nfts/src/weights.rs index 1dab0838f32b2..9d62db0f8d85d 100644 --- a/frame/nfts/src/weights.rs +++ b/frame/nfts/src/weights.rs @@ -18,7 +18,7 @@ //! Autogenerated weights for pallet_nfts //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2022-09-24, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2022-09-30, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` //! HOSTNAME: `bm3`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 @@ -74,6 +74,9 @@ pub trait WeightInfo { fn set_price() -> Weight; fn buy_item() -> Weight; fn pay_tips(n: u32, ) -> Weight; + fn create_swap() -> Weight; + fn cancel_swap() -> Weight; + fn claim_swap() -> Weight; } /// Weights for pallet_nfts using the Substrate node and recommended hardware. @@ -83,7 +86,7 @@ impl WeightInfo for SubstrateWeight { // Storage: Nfts Class (r:1 w:1) // Storage: Nfts ClassAccount (r:0 w:1) fn create() -> Weight { - Weight::from_ref_time(37_627_000 as u64) + Weight::from_ref_time(38_062_000 as u64) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -91,7 +94,7 @@ impl WeightInfo for SubstrateWeight { // Storage: Nfts Class (r:1 w:1) // Storage: Nfts ClassAccount (r:0 w:1) fn force_create() -> Weight { - Weight::from_ref_time(25_748_000 as u64) + Weight::from_ref_time(25_917_000 as u64) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -107,13 +110,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `m` is `[0, 1000]`. /// The range of component `a` is `[0, 1000]`. fn destroy(n: u32, m: u32, a: u32, ) -> Weight { - Weight::from_ref_time(2_449_817_000 as u64) - // Standard Error: 27_329 - .saturating_add(Weight::from_ref_time(8_423_500 as u64).saturating_mul(n as u64)) - // Standard Error: 27_329 - .saturating_add(Weight::from_ref_time(315_839 as u64).saturating_mul(m as u64)) - // Standard Error: 27_329 - .saturating_add(Weight::from_ref_time(217_497 as u64).saturating_mul(a as u64)) + Weight::from_ref_time(2_432_555_000 as u64) + // Standard Error: 28_964 + .saturating_add(Weight::from_ref_time(8_474_465 as u64).saturating_mul(n as u64)) + // Standard Error: 28_964 + .saturating_add(Weight::from_ref_time(333_758 as u64).saturating_mul(m as u64)) + // Standard Error: 28_964 + .saturating_add(Weight::from_ref_time(222_052 as u64).saturating_mul(a as u64)) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().reads((1 as u64).saturating_mul(n as u64))) .saturating_add(T::DbWeight::get().writes(2004 as u64)) @@ -124,7 +127,7 @@ impl WeightInfo for SubstrateWeight { // Storage: Nfts CollectionMaxSupply (r:1 w:0) // Storage: Nfts Account (r:0 w:1) fn mint() -> Weight { - Weight::from_ref_time(43_014_000 as u64) + Weight::from_ref_time(43_755_000 as u64) .saturating_add(T::DbWeight::get().reads(3 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -132,27 +135,29 @@ impl WeightInfo for SubstrateWeight { // Storage: Nfts Asset (r:1 w:1) // Storage: Nfts Account (r:0 w:1) // Storage: Nfts ItemPriceOf (r:0 w:1) + // Storage: Nfts PendingSwapOf (r:0 w:1) fn burn() -> Weight { - Weight::from_ref_time(44_421_000 as u64) + Weight::from_ref_time(46_768_000 as u64) .saturating_add(T::DbWeight::get().reads(2 as u64)) - .saturating_add(T::DbWeight::get().writes(4 as u64)) + .saturating_add(T::DbWeight::get().writes(5 as u64)) } // Storage: Nfts Class (r:1 w:0) // Storage: Nfts Asset (r:1 w:1) // Storage: Nfts Account (r:0 w:2) // Storage: Nfts ItemPriceOf (r:0 w:1) + // Storage: Nfts PendingSwapOf (r:0 w:1) fn transfer() -> Weight { - Weight::from_ref_time(34_315_000 as u64) + Weight::from_ref_time(36_282_000 as u64) .saturating_add(T::DbWeight::get().reads(2 as u64)) - .saturating_add(T::DbWeight::get().writes(4 as u64)) + .saturating_add(T::DbWeight::get().writes(5 as u64)) } // Storage: Nfts Class (r:1 w:1) // Storage: Nfts Asset (r:102 w:102) /// The range of component `i` is `[0, 5000]`. fn redeposit(i: u32, ) -> Weight { - Weight::from_ref_time(22_836_000 as u64) - // Standard Error: 9_131 - .saturating_add(Weight::from_ref_time(10_894_264 as u64).saturating_mul(i as u64)) + Weight::from_ref_time(23_359_000 as u64) + // Standard Error: 9_645 + .saturating_add(Weight::from_ref_time(10_822_144 as u64).saturating_mul(i as u64)) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().reads((1 as u64).saturating_mul(i as u64))) .saturating_add(T::DbWeight::get().writes(1 as u64)) @@ -161,26 +166,26 @@ impl WeightInfo for SubstrateWeight { // Storage: Nfts Asset (r:1 w:1) // Storage: Nfts Class (r:1 w:0) fn freeze() -> Weight { - Weight::from_ref_time(27_329_000 as u64) + Weight::from_ref_time(27_805_000 as u64) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Nfts Asset (r:1 w:1) // Storage: Nfts Class (r:1 w:0) fn thaw() -> Weight { - Weight::from_ref_time(27_842_000 as u64) + Weight::from_ref_time(27_712_000 as u64) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Nfts Class (r:1 w:1) fn freeze_collection() -> Weight { - Weight::from_ref_time(23_129_000 as u64) + Weight::from_ref_time(23_068_000 as u64) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Nfts Class (r:1 w:1) fn thaw_collection() -> Weight { - Weight::from_ref_time(22_584_000 as u64) + Weight::from_ref_time(23_200_000 as u64) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } @@ -188,20 +193,20 @@ impl WeightInfo for SubstrateWeight { // Storage: Nfts Class (r:1 w:1) // Storage: Nfts ClassAccount (r:0 w:2) fn transfer_ownership() -> Weight { - Weight::from_ref_time(31_684_000 as u64) + Weight::from_ref_time(31_800_000 as u64) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(4 as u64)) } // Storage: Nfts Class (r:1 w:1) fn set_team() -> Weight { - Weight::from_ref_time(23_143_000 as u64) + Weight::from_ref_time(23_959_000 as u64) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Nfts Class (r:1 w:1) // Storage: Nfts ClassAccount (r:0 w:1) fn force_item_status() -> Weight { - Weight::from_ref_time(25_684_000 as u64) + Weight::from_ref_time(26_334_000 as u64) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(2 as u64)) } @@ -209,7 +214,7 @@ impl WeightInfo for SubstrateWeight { // Storage: Nfts InstanceMetadataOf (r:1 w:0) // Storage: Nfts Attribute (r:1 w:1) fn set_attribute() -> Weight { - Weight::from_ref_time(50_159_000 as u64) + Weight::from_ref_time(50_978_000 as u64) .saturating_add(T::DbWeight::get().reads(3 as u64)) .saturating_add(T::DbWeight::get().writes(2 as u64)) } @@ -217,76 +222,76 @@ impl WeightInfo for SubstrateWeight { // Storage: Nfts InstanceMetadataOf (r:1 w:0) // Storage: Nfts Attribute (r:1 w:1) fn clear_attribute() -> Weight { - Weight::from_ref_time(47_824_000 as u64) + Weight::from_ref_time(49_555_000 as u64) .saturating_add(T::DbWeight::get().reads(3 as u64)) .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Nfts Class (r:1 w:1) // Storage: Nfts InstanceMetadataOf (r:1 w:1) fn set_metadata() -> Weight { - Weight::from_ref_time(39_968_000 as u64) + Weight::from_ref_time(41_099_000 as u64) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Nfts Class (r:1 w:1) // Storage: Nfts InstanceMetadataOf (r:1 w:1) fn clear_metadata() -> Weight { - Weight::from_ref_time(42_182_000 as u64) + Weight::from_ref_time(42_893_000 as u64) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Nfts Class (r:1 w:1) // Storage: Nfts ClassMetadataOf (r:1 w:1) fn set_collection_metadata() -> Weight { - Weight::from_ref_time(39_330_000 as u64) + Weight::from_ref_time(39_785_000 as u64) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Nfts Class (r:1 w:0) // Storage: Nfts ClassMetadataOf (r:1 w:1) fn clear_collection_metadata() -> Weight { - Weight::from_ref_time(38_351_000 as u64) + Weight::from_ref_time(39_764_000 as u64) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Nfts Class (r:1 w:0) // Storage: Nfts Asset (r:1 w:1) fn approve_transfer() -> Weight { - Weight::from_ref_time(29_530_000 as u64) + Weight::from_ref_time(29_577_000 as u64) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Nfts Class (r:1 w:0) // Storage: Nfts Asset (r:1 w:1) fn cancel_approval() -> Weight { - Weight::from_ref_time(29_417_000 as u64) + Weight::from_ref_time(29_696_000 as u64) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Nfts Class (r:1 w:0) // Storage: Nfts Asset (r:1 w:1) fn clear_all_transfer_approvals() -> Weight { - Weight::from_ref_time(28_482_000 as u64) + Weight::from_ref_time(28_692_000 as u64) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Nfts OwnershipAcceptance (r:1 w:1) fn set_accept_ownership() -> Weight { - Weight::from_ref_time(25_851_000 as u64) + Weight::from_ref_time(26_345_000 as u64) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Nfts CollectionMaxSupply (r:1 w:1) // Storage: Nfts Class (r:1 w:0) fn set_collection_max_supply() -> Weight { - Weight::from_ref_time(24_836_000 as u64) + Weight::from_ref_time(24_826_000 as u64) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Nfts Asset (r:1 w:0) // Storage: Nfts ItemPriceOf (r:0 w:1) fn set_price() -> Weight { - Weight::from_ref_time(25_665_000 as u64) + Weight::from_ref_time(26_376_000 as u64) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } @@ -294,16 +299,41 @@ impl WeightInfo for SubstrateWeight { // Storage: Nfts ItemPriceOf (r:1 w:1) // Storage: Nfts Class (r:1 w:0) // Storage: Nfts Account (r:0 w:2) + // Storage: Nfts PendingSwapOf (r:0 w:1) fn buy_item() -> Weight { - Weight::from_ref_time(47_502_000 as u64) + Weight::from_ref_time(49_140_000 as u64) .saturating_add(T::DbWeight::get().reads(3 as u64)) - .saturating_add(T::DbWeight::get().writes(4 as u64)) + .saturating_add(T::DbWeight::get().writes(5 as u64)) } /// The range of component `n` is `[0, 10]`. fn pay_tips(n: u32, ) -> Weight { - Weight::from_ref_time(5_417_000 as u64) - // Standard Error: 32_526 - .saturating_add(Weight::from_ref_time(4_304_363 as u64).saturating_mul(n as u64)) + Weight::from_ref_time(5_477_000 as u64) + // Standard Error: 33_188 + .saturating_add(Weight::from_ref_time(4_285_339 as u64).saturating_mul(n as u64)) + } + // Storage: Nfts Asset (r:2 w:0) + // Storage: Nfts PendingSwapOf (r:0 w:1) + fn create_swap() -> Weight { + Weight::from_ref_time(30_330_000 as u64) + .saturating_add(T::DbWeight::get().reads(2 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) + } + // Storage: Nfts PendingSwapOf (r:1 w:1) + // Storage: Nfts Asset (r:1 w:0) + fn cancel_swap() -> Weight { + Weight::from_ref_time(30_516_000 as u64) + .saturating_add(T::DbWeight::get().reads(2 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) + } + // Storage: Nfts Asset (r:2 w:2) + // Storage: Nfts PendingSwapOf (r:1 w:2) + // Storage: Nfts Class (r:1 w:0) + // Storage: Nfts Account (r:0 w:4) + // Storage: Nfts ItemPriceOf (r:0 w:2) + fn claim_swap() -> Weight { + Weight::from_ref_time(66_191_000 as u64) + .saturating_add(T::DbWeight::get().reads(4 as u64)) + .saturating_add(T::DbWeight::get().writes(10 as u64)) } } @@ -313,7 +343,7 @@ impl WeightInfo for () { // Storage: Nfts Class (r:1 w:1) // Storage: Nfts ClassAccount (r:0 w:1) fn create() -> Weight { - Weight::from_ref_time(37_627_000 as u64) + Weight::from_ref_time(38_062_000 as u64) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -321,7 +351,7 @@ impl WeightInfo for () { // Storage: Nfts Class (r:1 w:1) // Storage: Nfts ClassAccount (r:0 w:1) fn force_create() -> Weight { - Weight::from_ref_time(25_748_000 as u64) + Weight::from_ref_time(25_917_000 as u64) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -337,13 +367,13 @@ impl WeightInfo for () { /// The range of component `m` is `[0, 1000]`. /// The range of component `a` is `[0, 1000]`. fn destroy(n: u32, m: u32, a: u32, ) -> Weight { - Weight::from_ref_time(2_449_817_000 as u64) - // Standard Error: 27_329 - .saturating_add(Weight::from_ref_time(8_423_500 as u64).saturating_mul(n as u64)) - // Standard Error: 27_329 - .saturating_add(Weight::from_ref_time(315_839 as u64).saturating_mul(m as u64)) - // Standard Error: 27_329 - .saturating_add(Weight::from_ref_time(217_497 as u64).saturating_mul(a as u64)) + Weight::from_ref_time(2_432_555_000 as u64) + // Standard Error: 28_964 + .saturating_add(Weight::from_ref_time(8_474_465 as u64).saturating_mul(n as u64)) + // Standard Error: 28_964 + .saturating_add(Weight::from_ref_time(333_758 as u64).saturating_mul(m as u64)) + // Standard Error: 28_964 + .saturating_add(Weight::from_ref_time(222_052 as u64).saturating_mul(a as u64)) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().reads((1 as u64).saturating_mul(n as u64))) .saturating_add(RocksDbWeight::get().writes(2004 as u64)) @@ -354,7 +384,7 @@ impl WeightInfo for () { // Storage: Nfts CollectionMaxSupply (r:1 w:0) // Storage: Nfts Account (r:0 w:1) fn mint() -> Weight { - Weight::from_ref_time(43_014_000 as u64) + Weight::from_ref_time(43_755_000 as u64) .saturating_add(RocksDbWeight::get().reads(3 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -362,27 +392,29 @@ impl WeightInfo for () { // Storage: Nfts Asset (r:1 w:1) // Storage: Nfts Account (r:0 w:1) // Storage: Nfts ItemPriceOf (r:0 w:1) + // Storage: Nfts PendingSwapOf (r:0 w:1) fn burn() -> Weight { - Weight::from_ref_time(44_421_000 as u64) + Weight::from_ref_time(46_768_000 as u64) .saturating_add(RocksDbWeight::get().reads(2 as u64)) - .saturating_add(RocksDbWeight::get().writes(4 as u64)) + .saturating_add(RocksDbWeight::get().writes(5 as u64)) } // Storage: Nfts Class (r:1 w:0) // Storage: Nfts Asset (r:1 w:1) // Storage: Nfts Account (r:0 w:2) // Storage: Nfts ItemPriceOf (r:0 w:1) + // Storage: Nfts PendingSwapOf (r:0 w:1) fn transfer() -> Weight { - Weight::from_ref_time(34_315_000 as u64) + Weight::from_ref_time(36_282_000 as u64) .saturating_add(RocksDbWeight::get().reads(2 as u64)) - .saturating_add(RocksDbWeight::get().writes(4 as u64)) + .saturating_add(RocksDbWeight::get().writes(5 as u64)) } // Storage: Nfts Class (r:1 w:1) // Storage: Nfts Asset (r:102 w:102) /// The range of component `i` is `[0, 5000]`. fn redeposit(i: u32, ) -> Weight { - Weight::from_ref_time(22_836_000 as u64) - // Standard Error: 9_131 - .saturating_add(Weight::from_ref_time(10_894_264 as u64).saturating_mul(i as u64)) + Weight::from_ref_time(23_359_000 as u64) + // Standard Error: 9_645 + .saturating_add(Weight::from_ref_time(10_822_144 as u64).saturating_mul(i as u64)) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().reads((1 as u64).saturating_mul(i as u64))) .saturating_add(RocksDbWeight::get().writes(1 as u64)) @@ -391,26 +423,26 @@ impl WeightInfo for () { // Storage: Nfts Asset (r:1 w:1) // Storage: Nfts Class (r:1 w:0) fn freeze() -> Weight { - Weight::from_ref_time(27_329_000 as u64) + Weight::from_ref_time(27_805_000 as u64) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Nfts Asset (r:1 w:1) // Storage: Nfts Class (r:1 w:0) fn thaw() -> Weight { - Weight::from_ref_time(27_842_000 as u64) + Weight::from_ref_time(27_712_000 as u64) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Nfts Class (r:1 w:1) fn freeze_collection() -> Weight { - Weight::from_ref_time(23_129_000 as u64) + Weight::from_ref_time(23_068_000 as u64) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Nfts Class (r:1 w:1) fn thaw_collection() -> Weight { - Weight::from_ref_time(22_584_000 as u64) + Weight::from_ref_time(23_200_000 as u64) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } @@ -418,20 +450,20 @@ impl WeightInfo for () { // Storage: Nfts Class (r:1 w:1) // Storage: Nfts ClassAccount (r:0 w:2) fn transfer_ownership() -> Weight { - Weight::from_ref_time(31_684_000 as u64) + Weight::from_ref_time(31_800_000 as u64) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(4 as u64)) } // Storage: Nfts Class (r:1 w:1) fn set_team() -> Weight { - Weight::from_ref_time(23_143_000 as u64) + Weight::from_ref_time(23_959_000 as u64) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Nfts Class (r:1 w:1) // Storage: Nfts ClassAccount (r:0 w:1) fn force_item_status() -> Weight { - Weight::from_ref_time(25_684_000 as u64) + Weight::from_ref_time(26_334_000 as u64) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(2 as u64)) } @@ -439,7 +471,7 @@ impl WeightInfo for () { // Storage: Nfts InstanceMetadataOf (r:1 w:0) // Storage: Nfts Attribute (r:1 w:1) fn set_attribute() -> Weight { - Weight::from_ref_time(50_159_000 as u64) + Weight::from_ref_time(50_978_000 as u64) .saturating_add(RocksDbWeight::get().reads(3 as u64)) .saturating_add(RocksDbWeight::get().writes(2 as u64)) } @@ -447,76 +479,76 @@ impl WeightInfo for () { // Storage: Nfts InstanceMetadataOf (r:1 w:0) // Storage: Nfts Attribute (r:1 w:1) fn clear_attribute() -> Weight { - Weight::from_ref_time(47_824_000 as u64) + Weight::from_ref_time(49_555_000 as u64) .saturating_add(RocksDbWeight::get().reads(3 as u64)) .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Nfts Class (r:1 w:1) // Storage: Nfts InstanceMetadataOf (r:1 w:1) fn set_metadata() -> Weight { - Weight::from_ref_time(39_968_000 as u64) + Weight::from_ref_time(41_099_000 as u64) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Nfts Class (r:1 w:1) // Storage: Nfts InstanceMetadataOf (r:1 w:1) fn clear_metadata() -> Weight { - Weight::from_ref_time(42_182_000 as u64) + Weight::from_ref_time(42_893_000 as u64) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Nfts Class (r:1 w:1) // Storage: Nfts ClassMetadataOf (r:1 w:1) fn set_collection_metadata() -> Weight { - Weight::from_ref_time(39_330_000 as u64) + Weight::from_ref_time(39_785_000 as u64) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Nfts Class (r:1 w:0) // Storage: Nfts ClassMetadataOf (r:1 w:1) fn clear_collection_metadata() -> Weight { - Weight::from_ref_time(38_351_000 as u64) + Weight::from_ref_time(39_764_000 as u64) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Nfts Class (r:1 w:0) // Storage: Nfts Asset (r:1 w:1) fn approve_transfer() -> Weight { - Weight::from_ref_time(29_530_000 as u64) + Weight::from_ref_time(29_577_000 as u64) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Nfts Class (r:1 w:0) // Storage: Nfts Asset (r:1 w:1) fn cancel_approval() -> Weight { - Weight::from_ref_time(29_417_000 as u64) + Weight::from_ref_time(29_696_000 as u64) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Nfts Class (r:1 w:0) // Storage: Nfts Asset (r:1 w:1) fn clear_all_transfer_approvals() -> Weight { - Weight::from_ref_time(28_482_000 as u64) + Weight::from_ref_time(28_692_000 as u64) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Nfts OwnershipAcceptance (r:1 w:1) fn set_accept_ownership() -> Weight { - Weight::from_ref_time(25_851_000 as u64) + Weight::from_ref_time(26_345_000 as u64) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Nfts CollectionMaxSupply (r:1 w:1) // Storage: Nfts Class (r:1 w:0) fn set_collection_max_supply() -> Weight { - Weight::from_ref_time(24_836_000 as u64) + Weight::from_ref_time(24_826_000 as u64) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Nfts Asset (r:1 w:0) // Storage: Nfts ItemPriceOf (r:0 w:1) fn set_price() -> Weight { - Weight::from_ref_time(25_665_000 as u64) + Weight::from_ref_time(26_376_000 as u64) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } @@ -524,15 +556,40 @@ impl WeightInfo for () { // Storage: Nfts ItemPriceOf (r:1 w:1) // Storage: Nfts Class (r:1 w:0) // Storage: Nfts Account (r:0 w:2) + // Storage: Nfts PendingSwapOf (r:0 w:1) fn buy_item() -> Weight { - Weight::from_ref_time(47_502_000 as u64) + Weight::from_ref_time(49_140_000 as u64) .saturating_add(RocksDbWeight::get().reads(3 as u64)) - .saturating_add(RocksDbWeight::get().writes(4 as u64)) + .saturating_add(RocksDbWeight::get().writes(5 as u64)) } /// The range of component `n` is `[0, 10]`. fn pay_tips(n: u32, ) -> Weight { - Weight::from_ref_time(5_417_000 as u64) - // Standard Error: 32_526 - .saturating_add(Weight::from_ref_time(4_304_363 as u64).saturating_mul(n as u64)) + Weight::from_ref_time(5_477_000 as u64) + // Standard Error: 33_188 + .saturating_add(Weight::from_ref_time(4_285_339 as u64).saturating_mul(n as u64)) + } + // Storage: Nfts Asset (r:2 w:0) + // Storage: Nfts PendingSwapOf (r:0 w:1) + fn create_swap() -> Weight { + Weight::from_ref_time(30_330_000 as u64) + .saturating_add(RocksDbWeight::get().reads(2 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) + } + // Storage: Nfts PendingSwapOf (r:1 w:1) + // Storage: Nfts Asset (r:1 w:0) + fn cancel_swap() -> Weight { + Weight::from_ref_time(30_516_000 as u64) + .saturating_add(RocksDbWeight::get().reads(2 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) + } + // Storage: Nfts Asset (r:2 w:2) + // Storage: Nfts PendingSwapOf (r:1 w:2) + // Storage: Nfts Class (r:1 w:0) + // Storage: Nfts Account (r:0 w:4) + // Storage: Nfts ItemPriceOf (r:0 w:2) + fn claim_swap() -> Weight { + Weight::from_ref_time(66_191_000 as u64) + .saturating_add(RocksDbWeight::get().reads(4 as u64)) + .saturating_add(RocksDbWeight::get().writes(10 as u64)) } } From ef16fd2da6ec283919551228e253f8490f79c3e9 Mon Sep 17 00:00:00 2001 From: Jegor Sidorenko <5252494+jsidorenko@users.noreply.github.com> Date: Tue, 18 Oct 2022 16:59:53 +0300 Subject: [PATCH 014/101] [Uniques V2] Feature flags (#12367) * Basics * WIP: change the data format * Refactor * Remove redundant new() method * Rename settings * Enable tests * Chore * Change params order * Delete the config on collection removal * Chore * Remove redundant system features * Rename force_item_status to force_collection_status * Update node runtime * Chore * Remove thaw_collection * Chore * Connect collection.is_frozen to config * Allow to lock the collection in a new way * Move free_holding into settings * Connect collection's metadata locker to feature flags * DRY * Chore * Connect pallet level feature flags * Prepare tests for the new changes * Implement Item settings * Allow to lock the metadata or attributes of an item * Common -> Settings * Extract settings related code to a separate file * Move feature flag checks inside the do_* methods * Split settings.rs into parts * Extract repeated code into macro * Extract macros into their own file * Chore * Fix traits * Fix traits * Test SystemFeatures * Fix benchmarks * Add missing benchmark * Fix node/runtime/lib.rs * ".git/.scripts/bench-bot.sh" pallet dev pallet_nfts * Keep item's config on burn if it's not empty * Fix the merge artifacts * Fmt * Add SystemFeature::NoSwaps check * Rename SystemFeatures to PalletFeatures * Rename errors * Add docs * Change error message * Rework pallet features * Move macros * Change comments * Fmt * Refactor Incrementable * Use pub(crate) for do_* functions * Update comments * Refactor freeze and lock functions * Rework Collection config and Item confg api * Chore * Make clippy happy * Chore * Update comment * RequiredDeposit => DepositRequired * Address comments Co-authored-by: command-bot <> --- Cargo.lock | 5 +- bin/node/runtime/src/lib.rs | 6 + frame/nfts/Cargo.toml | 1 + frame/nfts/src/benchmarking.rs | 105 ++- frame/nfts/src/features/atomic_swap.rs | 15 +- frame/nfts/src/features/buy_sell.rs | 2 +- frame/nfts/src/features/lock.rs | 120 +++ frame/nfts/src/features/mod.rs | 2 + frame/nfts/src/features/settings.rs | 47 ++ frame/nfts/src/functions.rs | 81 +- frame/nfts/src/impl_nonfungibles.rs | 32 +- frame/nfts/src/lib.rs | 479 +++++++----- frame/nfts/src/macros.rs | 74 ++ frame/nfts/src/mock.rs | 7 +- frame/nfts/src/tests.rs | 734 ++++++++++++++---- frame/nfts/src/types.rs | 151 +++- frame/nfts/src/weights.rs | 352 +++++---- frame/support/src/traits/tokens.rs | 2 + .../src/traits/tokens/nonfungible_v2.rs | 204 +++++ .../src/traits/tokens/nonfungibles_v2.rs | 243 ++++++ 20 files changed, 2042 insertions(+), 620 deletions(-) create mode 100644 frame/nfts/src/features/lock.rs create mode 100644 frame/nfts/src/features/settings.rs create mode 100644 frame/nfts/src/macros.rs create mode 100644 frame/support/src/traits/tokens/nonfungible_v2.rs create mode 100644 frame/support/src/traits/tokens/nonfungibles_v2.rs diff --git a/Cargo.lock b/Cargo.lock index 3151b061d461e..27e94a780add3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1895,9 +1895,9 @@ dependencies = [ [[package]] name = "enumflags2" -version = "0.7.4" +version = "0.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b3ab37dc79652c9d85f1f7b6070d77d321d2467f5fe7b00d6b7a86c57b092ae" +checksum = "e75d4cd21b95383444831539909fbb14b9dc3fdceb2a6f5d36577329a1f55ccb" dependencies = [ "enumflags2_derive", ] @@ -5935,6 +5935,7 @@ dependencies = [ name = "pallet-nfts" version = "4.0.0-dev" dependencies = [ + "enumflags2", "frame-benchmarking", "frame-support", "frame-system", diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index d8152ceb770c2..6d8de0b47100b 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -53,6 +53,7 @@ use pallet_grandpa::{ fg_primitives, AuthorityId as GrandpaId, AuthorityList as GrandpaAuthorityList, }; use pallet_im_online::sr25519::AuthorityId as ImOnlineId; +use pallet_nfts::PalletFeatures; use pallet_session::historical::{self as pallet_session_historical}; pub use pallet_transaction_payment::{CurrencyAdapter, Multiplier, TargetedFeeAdjustment}; use pallet_transaction_payment::{FeeDetails, RuntimeDispatchInfo}; @@ -1504,6 +1505,10 @@ impl pallet_uniques::Config for Runtime { type Locker = (); } +parameter_types! { + pub Features: PalletFeatures = PalletFeatures::all_enabled(); +} + impl pallet_nfts::Config for Runtime { type RuntimeEvent = RuntimeEvent; type CollectionId = u32; @@ -1521,6 +1526,7 @@ impl pallet_nfts::Config for Runtime { type ApprovalsLimit = ApprovalsLimit; type MaxTips = MaxTips; type MaxDeadlineDuration = MaxDeadlineDuration; + type Features = Features; type WeightInfo = pallet_nfts::weights::SubstrateWeight; #[cfg(feature = "runtime-benchmarks")] type Helper = (); diff --git a/frame/nfts/Cargo.toml b/frame/nfts/Cargo.toml index 5e1a3d79d3a65..f0b68ea702e3a 100644 --- a/frame/nfts/Cargo.toml +++ b/frame/nfts/Cargo.toml @@ -14,6 +14,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false } +enumflags2 = { version = "0.7.5" } log = { version = "0.4.17", default-features = false } scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } frame-benchmarking = { version = "4.0.0-dev", default-features = false, optional = true, path = "../benchmarking" } diff --git a/frame/nfts/src/benchmarking.rs b/frame/nfts/src/benchmarking.rs index b35122cf919c7..c65430fd35108 100644 --- a/frame/nfts/src/benchmarking.rs +++ b/frame/nfts/src/benchmarking.rs @@ -24,6 +24,7 @@ use frame_benchmarking::{ account, benchmarks_instance_pallet, whitelist_account, whitelisted_caller, }; use frame_support::{ + assert_ok, dispatch::UnfilteredDispatchable, traits::{EnsureOrigin, Get}, BoundedVec, @@ -42,8 +43,11 @@ fn create_collection, I: 'static>( let caller_lookup = T::Lookup::unlookup(caller.clone()); let collection = T::Helper::collection(0); T::Currency::make_free_balance_be(&caller, DepositBalanceOf::::max_value()); - assert!(Nfts::::force_create(SystemOrigin::Root.into(), caller_lookup.clone(), false,) - .is_ok()); + assert_ok!(Nfts::::force_create( + SystemOrigin::Root.into(), + caller_lookup.clone(), + CollectionConfig::all_settings_enabled() + )); (collection, caller, caller_lookup) } @@ -53,13 +57,11 @@ fn add_collection_metadata, I: 'static>() -> (T::AccountId, Account whitelist_account!(caller); } let caller_lookup = T::Lookup::unlookup(caller.clone()); - assert!(Nfts::::set_collection_metadata( + assert_ok!(Nfts::::set_collection_metadata( SystemOrigin::Signed(caller.clone()).into(), T::Helper::collection(0), vec![0; T::StringLimit::get() as usize].try_into().unwrap(), - false, - ) - .is_ok()); + )); (caller, caller_lookup) } @@ -72,13 +74,13 @@ fn mint_item, I: 'static>( } let caller_lookup = T::Lookup::unlookup(caller.clone()); let item = T::Helper::item(index); - assert!(Nfts::::mint( + assert_ok!(Nfts::::mint( SystemOrigin::Signed(caller.clone()).into(), T::Helper::collection(0), item, caller_lookup.clone(), - ) - .is_ok()); + ItemConfig::all_settings_enabled(), + )); (item, caller, caller_lookup) } @@ -90,14 +92,12 @@ fn add_item_metadata, I: 'static>( whitelist_account!(caller); } let caller_lookup = T::Lookup::unlookup(caller.clone()); - assert!(Nfts::::set_metadata( + assert_ok!(Nfts::::set_metadata( SystemOrigin::Signed(caller.clone()).into(), T::Helper::collection(0), item, vec![0; T::StringLimit::get() as usize].try_into().unwrap(), - false, - ) - .is_ok()); + )); (caller, caller_lookup) } @@ -110,14 +110,13 @@ fn add_item_attribute, I: 'static>( } let caller_lookup = T::Lookup::unlookup(caller.clone()); let key: BoundedVec<_, _> = vec![0; T::KeyLimit::get() as usize].try_into().unwrap(); - assert!(Nfts::::set_attribute( + assert_ok!(Nfts::::set_attribute( SystemOrigin::Signed(caller.clone()).into(), T::Helper::collection(0), Some(item), key.clone(), vec![0; T::ValueLimit::get() as usize].try_into().unwrap(), - ) - .is_ok()); + )); (key, caller, caller_lookup) } @@ -137,7 +136,7 @@ benchmarks_instance_pallet! { whitelist_account!(caller); let admin = T::Lookup::unlookup(caller.clone()); T::Currency::make_free_balance_be(&caller, DepositBalanceOf::::max_value()); - let call = Call::::create { admin }; + let call = Call::::create { admin, config: CollectionConfig::all_settings_enabled() }; }: { call.dispatch_bypass_filter(origin)? } verify { assert_last_event::(Event::Created { collection: T::Helper::collection(0), creator: caller.clone(), owner: caller }.into()); @@ -146,25 +145,19 @@ benchmarks_instance_pallet! { force_create { let caller: T::AccountId = whitelisted_caller(); let caller_lookup = T::Lookup::unlookup(caller.clone()); - }: _(SystemOrigin::Root, caller_lookup, true) + }: _(SystemOrigin::Root, caller_lookup, CollectionConfig::all_settings_enabled()) verify { assert_last_event::(Event::ForceCreated { collection: T::Helper::collection(0), owner: caller }.into()); } destroy { let n in 0 .. 1_000; - let m in 0 .. 1_000; - let a in 0 .. 1_000; let (collection, caller, caller_lookup) = create_collection::(); add_collection_metadata::(); for i in 0..n { mint_item::(i as u16); - } - for i in 0..m { add_item_metadata::(T::Helper::item(i as u16)); - } - for i in 0..a { add_item_attribute::(T::Helper::item(i as u16)); } let witness = Collection::::get(collection).unwrap().destroy_witness(); @@ -176,7 +169,7 @@ benchmarks_instance_pallet! { mint { let (collection, caller, caller_lookup) = create_collection::(); let item = T::Helper::item(0); - }: _(SystemOrigin::Signed(caller.clone()), collection, item, caller_lookup) + }: _(SystemOrigin::Signed(caller.clone()), collection, item, caller_lookup, ItemConfig::all_settings_enabled()) verify { assert_last_event::(Event::Issued { collection, item, owner: caller }.into()); } @@ -204,56 +197,51 @@ benchmarks_instance_pallet! { let i in 0 .. 5_000; let (collection, caller, caller_lookup) = create_collection::(); let items = (0..i).map(|x| mint_item::(x as u16).0).collect::>(); - Nfts::::force_item_status( + Nfts::::force_collection_status( SystemOrigin::Root.into(), collection, caller_lookup.clone(), caller_lookup.clone(), caller_lookup.clone(), caller_lookup, - true, - false, + CollectionConfig(CollectionSetting::DepositRequired.into()), )?; }: _(SystemOrigin::Signed(caller.clone()), collection, items.clone()) verify { assert_last_event::(Event::Redeposited { collection, successful_items: items }.into()); } - freeze { + lock_item_transfer { let (collection, caller, caller_lookup) = create_collection::(); let (item, ..) = mint_item::(0); }: _(SystemOrigin::Signed(caller.clone()), T::Helper::collection(0), T::Helper::item(0)) verify { - assert_last_event::(Event::Frozen { collection: T::Helper::collection(0), item: T::Helper::item(0) }.into()); + assert_last_event::(Event::ItemTransferLocked { collection: T::Helper::collection(0), item: T::Helper::item(0) }.into()); } - thaw { + unlock_item_transfer { let (collection, caller, caller_lookup) = create_collection::(); let (item, ..) = mint_item::(0); - Nfts::::freeze( + Nfts::::lock_item_transfer( SystemOrigin::Signed(caller.clone()).into(), collection, item, )?; }: _(SystemOrigin::Signed(caller.clone()), collection, item) verify { - assert_last_event::(Event::Thawed { collection, item }.into()); + assert_last_event::(Event::ItemTransferUnlocked { collection, item }.into()); } - freeze_collection { + lock_collection { let (collection, caller, caller_lookup) = create_collection::(); - }: _(SystemOrigin::Signed(caller.clone()), collection) + let lock_config = CollectionConfig( + CollectionSetting::TransferableItems | + CollectionSetting::UnlockedMetadata | + CollectionSetting::UnlockedAttributes, + ); + }: _(SystemOrigin::Signed(caller.clone()), collection, lock_config) verify { - assert_last_event::(Event::CollectionFrozen { collection }.into()); - } - - thaw_collection { - let (collection, caller, caller_lookup) = create_collection::(); - let origin = SystemOrigin::Signed(caller.clone()).into(); - Nfts::::freeze_collection(origin, collection)?; - }: _(SystemOrigin::Signed(caller.clone()), collection) - verify { - assert_last_event::(Event::CollectionThawed { collection }.into()); + assert_last_event::(Event::CollectionLocked { collection }.into()); } transfer_ownership { @@ -283,21 +271,30 @@ benchmarks_instance_pallet! { }.into()); } - force_item_status { + force_collection_status { let (collection, caller, caller_lookup) = create_collection::(); let origin = T::ForceOrigin::successful_origin(); - let call = Call::::force_item_status { + let call = Call::::force_collection_status { collection, owner: caller_lookup.clone(), issuer: caller_lookup.clone(), admin: caller_lookup.clone(), freezer: caller_lookup, - free_holding: true, - is_frozen: false, + config: CollectionConfig(CollectionSetting::DepositRequired.into()), }; }: { call.dispatch_bypass_filter(origin)? } verify { - assert_last_event::(Event::ItemStatusChanged { collection }.into()); + assert_last_event::(Event::CollectionStatusChanged { collection }.into()); + } + + lock_item_properties { + let (collection, caller, caller_lookup) = create_collection::(); + let (item, ..) = mint_item::(0); + let lock_metadata = true; + let lock_attributes = true; + }: _(SystemOrigin::Signed(caller), collection, item, lock_metadata, lock_attributes) + verify { + assert_last_event::(Event::ItemPropertiesLocked { collection, item, lock_metadata, lock_attributes }.into()); } set_attribute { @@ -327,9 +324,9 @@ benchmarks_instance_pallet! { let (collection, caller, _) = create_collection::(); let (item, ..) = mint_item::(0); - }: _(SystemOrigin::Signed(caller), collection, item, data.clone(), false) + }: _(SystemOrigin::Signed(caller), collection, item, data.clone()) verify { - assert_last_event::(Event::MetadataSet { collection, item, data, is_frozen: false }.into()); + assert_last_event::(Event::MetadataSet { collection, item, data }.into()); } clear_metadata { @@ -345,9 +342,9 @@ benchmarks_instance_pallet! { let data: BoundedVec<_, _> = vec![0u8; T::StringLimit::get() as usize].try_into().unwrap(); let (collection, caller, _) = create_collection::(); - }: _(SystemOrigin::Signed(caller), collection, data.clone(), false) + }: _(SystemOrigin::Signed(caller), collection, data.clone()) verify { - assert_last_event::(Event::CollectionMetadataSet { collection, data, is_frozen: false }.into()); + assert_last_event::(Event::CollectionMetadataSet { collection, data }.into()); } clear_collection_metadata { diff --git a/frame/nfts/src/features/atomic_swap.rs b/frame/nfts/src/features/atomic_swap.rs index 116da57477f4e..bacaccdaedcbf 100644 --- a/frame/nfts/src/features/atomic_swap.rs +++ b/frame/nfts/src/features/atomic_swap.rs @@ -22,7 +22,7 @@ use frame_support::{ }; impl, I: 'static> Pallet { - pub fn do_create_swap( + pub(crate) fn do_create_swap( caller: T::AccountId, offered_collection_id: T::CollectionId, offered_item_id: T::ItemId, @@ -31,6 +31,10 @@ impl, I: 'static> Pallet { maybe_price: Option>>, duration: ::BlockNumber, ) -> DispatchResult { + ensure!( + Self::is_pallet_feature_enabled(PalletFeature::Swaps), + Error::::MethodDisabled + ); ensure!(duration <= T::MaxDeadlineDuration::get(), Error::::WrongDuration); let item = Item::::get(&offered_collection_id, &offered_item_id) @@ -74,7 +78,7 @@ impl, I: 'static> Pallet { Ok(()) } - pub fn do_cancel_swap( + pub(crate) fn do_cancel_swap( caller: T::AccountId, offered_collection_id: T::CollectionId, offered_item_id: T::ItemId, @@ -103,7 +107,7 @@ impl, I: 'static> Pallet { Ok(()) } - pub fn do_claim_swap( + pub(crate) fn do_claim_swap( caller: T::AccountId, send_collection_id: T::CollectionId, send_item_id: T::ItemId, @@ -111,6 +115,11 @@ impl, I: 'static> Pallet { receive_item_id: T::ItemId, witness_price: Option>>, ) -> DispatchResult { + ensure!( + Self::is_pallet_feature_enabled(PalletFeature::Swaps), + Error::::MethodDisabled + ); + let send_item = Item::::get(&send_collection_id, &send_item_id) .ok_or(Error::::UnknownItem)?; let receive_item = Item::::get(&receive_collection_id, &receive_item_id) diff --git a/frame/nfts/src/features/buy_sell.rs b/frame/nfts/src/features/buy_sell.rs index 295d7fadfa8e6..c1e29057af9c9 100644 --- a/frame/nfts/src/features/buy_sell.rs +++ b/frame/nfts/src/features/buy_sell.rs @@ -22,7 +22,7 @@ use frame_support::{ }; impl, I: 'static> Pallet { - pub fn do_pay_tips( + pub(crate) fn do_pay_tips( sender: T::AccountId, tips: BoundedVec, T::MaxTips>, ) -> DispatchResult { diff --git a/frame/nfts/src/features/lock.rs b/frame/nfts/src/features/lock.rs new file mode 100644 index 0000000000000..0a5fecc1d6224 --- /dev/null +++ b/frame/nfts/src/features/lock.rs @@ -0,0 +1,120 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::*; +use frame_support::pallet_prelude::*; + +impl, I: 'static> Pallet { + pub(crate) fn do_lock_collection( + origin: T::AccountId, + collection: T::CollectionId, + lock_config: CollectionConfig, + ) -> DispatchResult { + let details = + Collection::::get(&collection).ok_or(Error::::UnknownCollection)?; + ensure!(origin == details.freezer, Error::::NoPermission); + + CollectionConfigOf::::try_mutate(collection, |maybe_config| { + let config = maybe_config.as_mut().ok_or(Error::::NoConfig)?; + + if lock_config.has_disabled_setting(CollectionSetting::TransferableItems) { + config.disable_setting(CollectionSetting::TransferableItems); + } + if lock_config.has_disabled_setting(CollectionSetting::UnlockedMetadata) { + config.disable_setting(CollectionSetting::UnlockedMetadata); + } + if lock_config.has_disabled_setting(CollectionSetting::UnlockedAttributes) { + config.disable_setting(CollectionSetting::UnlockedAttributes); + } + + Self::deposit_event(Event::::CollectionLocked { collection }); + Ok(()) + }) + } + + pub(crate) fn do_lock_item_transfer( + origin: T::AccountId, + collection: T::CollectionId, + item: T::ItemId, + ) -> DispatchResult { + let collection_details = + Collection::::get(&collection).ok_or(Error::::UnknownCollection)?; + ensure!(collection_details.freezer == origin, Error::::NoPermission); + + let mut config = Self::get_item_config(&collection, &item)?; + if !config.has_disabled_setting(ItemSetting::Transferable) { + config.disable_setting(ItemSetting::Transferable); + } + ItemConfigOf::::insert(&collection, &item, config); + + Self::deposit_event(Event::::ItemTransferLocked { collection, item }); + Ok(()) + } + + pub(crate) fn do_unlock_item_transfer( + origin: T::AccountId, + collection: T::CollectionId, + item: T::ItemId, + ) -> DispatchResult { + let collection_details = + Collection::::get(&collection).ok_or(Error::::UnknownCollection)?; + ensure!(collection_details.freezer == origin, Error::::NoPermission); + + let mut config = Self::get_item_config(&collection, &item)?; + if config.has_disabled_setting(ItemSetting::Transferable) { + config.enable_setting(ItemSetting::Transferable); + } + ItemConfigOf::::insert(&collection, &item, config); + + Self::deposit_event(Event::::ItemTransferUnlocked { collection, item }); + Ok(()) + } + + pub(crate) fn do_lock_item_properties( + maybe_check_owner: Option, + collection: T::CollectionId, + item: T::ItemId, + lock_metadata: bool, + lock_attributes: bool, + ) -> DispatchResult { + let collection_details = + Collection::::get(&collection).ok_or(Error::::UnknownCollection)?; + + if let Some(check_owner) = &maybe_check_owner { + ensure!(check_owner == &collection_details.owner, Error::::NoPermission); + } + + ItemConfigOf::::try_mutate(collection, item, |maybe_config| { + let config = maybe_config.as_mut().ok_or(Error::::UnknownItem)?; + + if lock_metadata { + config.disable_setting(ItemSetting::UnlockedMetadata); + } + if lock_attributes { + config.disable_setting(ItemSetting::UnlockedAttributes); + } + + Self::deposit_event(Event::::ItemPropertiesLocked { + collection, + item, + lock_metadata, + lock_attributes, + }); + Ok(()) + }) + } +} diff --git a/frame/nfts/src/features/mod.rs b/frame/nfts/src/features/mod.rs index 24b58dee5f3a0..47e5816bc953c 100644 --- a/frame/nfts/src/features/mod.rs +++ b/frame/nfts/src/features/mod.rs @@ -17,3 +17,5 @@ pub mod atomic_swap; pub mod buy_sell; +pub mod lock; +pub mod settings; diff --git a/frame/nfts/src/features/settings.rs b/frame/nfts/src/features/settings.rs new file mode 100644 index 0000000000000..2596d360d8dcd --- /dev/null +++ b/frame/nfts/src/features/settings.rs @@ -0,0 +1,47 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::*; +use frame_support::pallet_prelude::*; + +/// The helper methods bellow allow to read and validate different +/// collection/item/pallet settings. +/// For example, those settings allow to disable NFTs trading on a pallet level, or for a particular +/// collection, or for a specific item. +impl, I: 'static> Pallet { + pub(crate) fn get_collection_config( + collection_id: &T::CollectionId, + ) -> Result { + let config = CollectionConfigOf::::get(&collection_id) + .ok_or(Error::::UnknownCollection)?; + Ok(config) + } + + pub(crate) fn get_item_config( + collection_id: &T::CollectionId, + item_id: &T::ItemId, + ) -> Result { + let config = ItemConfigOf::::get(&collection_id, &item_id) + .ok_or(Error::::UnknownItem)?; + Ok(config) + } + + pub(crate) fn is_pallet_feature_enabled(feature: PalletFeature) -> bool { + let features = T::Features::get(); + return features.is_enabled(feature) + } +} diff --git a/frame/nfts/src/functions.rs b/frame/nfts/src/functions.rs index 17be672834bf2..275e3668d7a20 100644 --- a/frame/nfts/src/functions.rs +++ b/frame/nfts/src/functions.rs @@ -36,12 +36,22 @@ impl, I: 'static> Pallet { ) -> DispatchResult { let collection_details = Collection::::get(&collection).ok_or(Error::::UnknownCollection)?; - ensure!(!collection_details.is_frozen, Error::::Frozen); - ensure!(!T::Locker::is_locked(collection, item), Error::::Locked); + ensure!(!T::Locker::is_locked(collection, item), Error::::ItemLocked); + + let collection_config = Self::get_collection_config(&collection)?; + ensure!( + collection_config.is_setting_enabled(CollectionSetting::TransferableItems), + Error::::ItemsNotTransferable + ); + + let item_config = Self::get_item_config(&collection, &item)?; + ensure!( + item_config.is_setting_enabled(ItemSetting::Transferable), + Error::::ItemLocked + ); let mut details = - Item::::get(&collection, &item).ok_or(Error::::UnknownCollection)?; - ensure!(!details.is_frozen, Error::::Frozen); + Item::::get(&collection, &item).ok_or(Error::::UnknownItem)?; with_details(&collection_details, &mut details)?; Account::::remove((&details.owner, &collection, &item)); @@ -71,11 +81,11 @@ impl, I: 'static> Pallet { collection: T::CollectionId, owner: T::AccountId, admin: T::AccountId, + config: CollectionConfig, deposit: DepositBalanceOf, - free_holding: bool, event: Event, ) -> DispatchResult { - ensure!(!Collection::::contains_key(collection), Error::::InUse); + ensure!(!Collection::::contains_key(collection), Error::::CollectionIdInUse); T::Currency::reserve(&owner, deposit)?; @@ -87,16 +97,15 @@ impl, I: 'static> Pallet { admin: admin.clone(), freezer: admin, total_deposit: deposit, - free_holding, items: 0, item_metadatas: 0, attributes: 0, - is_frozen: false, }, ); let next_id = collection.increment(); + CollectionConfigOf::::insert(&collection, config); CollectionAccount::::insert(&owner, &collection, ()); NextCollectionId::::set(Some(next_id)); @@ -138,6 +147,8 @@ impl, I: 'static> Pallet { CollectionAccount::::remove(&collection_details.owner, &collection); T::Currency::unreserve(&collection_details.owner, collection_details.total_deposit); CollectionMaxSupply::::remove(&collection); + CollectionConfigOf::::remove(&collection); + let _ = ItemConfigOf::::clear_prefix(&collection, witness.items, None); Self::deposit_event(Event::Destroyed { collection }); @@ -153,6 +164,7 @@ impl, I: 'static> Pallet { collection: T::CollectionId, item: T::ItemId, owner: T::AccountId, + config: ItemConfig, with_details: impl FnOnce(&CollectionDetailsFor) -> DispatchResult, ) -> DispatchResult { ensure!(!Item::::contains_key(collection, item), Error::::AlreadyExists); @@ -173,21 +185,27 @@ impl, I: 'static> Pallet { collection_details.items.checked_add(1).ok_or(ArithmeticError::Overflow)?; collection_details.items = items; - let deposit = match collection_details.free_holding { - true => Zero::zero(), - false => T::ItemDeposit::get(), + let collection_config = Self::get_collection_config(&collection)?; + let deposit = match collection_config + .is_setting_enabled(CollectionSetting::DepositRequired) + { + true => T::ItemDeposit::get(), + false => Zero::zero(), }; T::Currency::reserve(&collection_details.owner, deposit)?; collection_details.total_deposit += deposit; let owner = owner.clone(); Account::::insert((&owner, &collection, &item), ()); - let details = ItemDetails { - owner, - approvals: ApprovalsOf::::default(), - is_frozen: false, - deposit, - }; + + if let Ok(existing_config) = ItemConfigOf::::try_get(&collection, &item) { + ensure!(existing_config == config, Error::::InconsistentItemConfig); + } else { + ItemConfigOf::::insert(&collection, &item, config); + } + + let details = + ItemDetails { owner, approvals: ApprovalsOf::::default(), deposit }; Item::::insert(&collection, &item, details); Ok(()) }, @@ -224,6 +242,13 @@ impl, I: 'static> Pallet { ItemPriceOf::::remove(&collection, &item); PendingSwapOf::::remove(&collection, &item); + // NOTE: if item's settings are not empty (e.g. item's metadata is locked) + // then we keep the record and don't remove it + let config = Self::get_item_config(&collection, &item)?; + if !config.has_disabled_settings() { + ItemConfigOf::::remove(&collection, &item); + } + Self::deposit_event(Event::Burned { collection, item, owner }); Ok(()) } @@ -235,9 +260,26 @@ impl, I: 'static> Pallet { price: Option>, whitelisted_buyer: Option, ) -> DispatchResult { + ensure!( + Self::is_pallet_feature_enabled(PalletFeature::Trading), + Error::::MethodDisabled + ); + let details = Item::::get(&collection, &item).ok_or(Error::::UnknownItem)?; ensure!(details.owner == sender, Error::::NoPermission); + let collection_config = Self::get_collection_config(&collection)?; + ensure!( + collection_config.is_setting_enabled(CollectionSetting::TransferableItems), + Error::::ItemsNotTransferable + ); + + let item_config = Self::get_item_config(&collection, &item)?; + ensure!( + item_config.is_setting_enabled(ItemSetting::Transferable), + Error::::ItemLocked + ); + if let Some(ref price) = price { ItemPriceOf::::insert(&collection, &item, (price, whitelisted_buyer.clone())); Self::deposit_event(Event::ItemPriceSet { @@ -260,6 +302,11 @@ impl, I: 'static> Pallet { buyer: T::AccountId, bid_price: ItemPrice, ) -> DispatchResult { + ensure!( + Self::is_pallet_feature_enabled(PalletFeature::Trading), + Error::::MethodDisabled + ); + let details = Item::::get(&collection, &item).ok_or(Error::::UnknownItem)?; ensure!(details.owner != buyer, Error::::NoPermission); diff --git a/frame/nfts/src/impl_nonfungibles.rs b/frame/nfts/src/impl_nonfungibles.rs index cead6f562ab58..8a7c79fc0c14f 100644 --- a/frame/nfts/src/impl_nonfungibles.rs +++ b/frame/nfts/src/impl_nonfungibles.rs @@ -19,7 +19,8 @@ use super::*; use frame_support::{ - traits::{tokens::nonfungibles::*, Get}, + ensure, + traits::{tokens::nonfungibles_v2::*, Get}, BoundedSlice, }; use sp_runtime::{DispatchError, DispatchResult}; @@ -78,26 +79,40 @@ impl, I: 'static> Inspect<::AccountId> for Palle /// /// Default implementation is that all items are transferable. fn can_transfer(collection: &Self::CollectionId, item: &Self::ItemId) -> bool { - match (Collection::::get(collection), Item::::get(collection, item)) { - (Some(cd), Some(id)) if !cd.is_frozen && !id.is_frozen => true, + match ( + CollectionConfigOf::::get(collection), + ItemConfigOf::::get(collection, item), + ) { + (Some(cc), Some(ic)) + if cc.is_setting_enabled(CollectionSetting::TransferableItems) && + ic.is_setting_enabled(ItemSetting::Transferable) => + true, _ => false, } } } -impl, I: 'static> Create<::AccountId> for Pallet { +impl, I: 'static> Create<::AccountId, CollectionConfig> + for Pallet +{ /// Create a `collection` of nonfungible items to be owned by `who` and managed by `admin`. fn create_collection( collection: &Self::CollectionId, who: &T::AccountId, admin: &T::AccountId, + config: &CollectionConfig, ) -> DispatchResult { + // DepositRequired can be disabled by calling the force_create() only + ensure!( + !config.has_disabled_setting(CollectionSetting::DepositRequired), + Error::::WrongSetting + ); Self::do_create_collection( *collection, who.clone(), admin.clone(), + *config, T::CollectionDeposit::get(), - false, Event::Created { collection: *collection, creator: who.clone(), owner: admin.clone() }, ) } @@ -119,13 +134,16 @@ impl, I: 'static> Destroy<::AccountId> for Palle } } -impl, I: 'static> Mutate<::AccountId> for Pallet { +impl, I: 'static> Mutate<::AccountId, ItemSettings> + for Pallet +{ fn mint_into( collection: &Self::CollectionId, item: &Self::ItemId, who: &T::AccountId, + settings: &ItemSettings, ) -> DispatchResult { - Self::do_mint(*collection, *item, who.clone(), |_| Ok(())) + Self::do_mint(*collection, *item, who.clone(), ItemConfig(*settings), |_| Ok(())) } fn burn( diff --git a/frame/nfts/src/lib.rs b/frame/nfts/src/lib.rs index 7e9b2a42f7e14..bfba0c1ea3330 100644 --- a/frame/nfts/src/lib.rs +++ b/frame/nfts/src/lib.rs @@ -40,6 +40,7 @@ mod functions; mod impl_nonfungibles; mod types; +pub mod macros; pub mod weights; use codec::{Decode, Encode}; @@ -62,29 +63,6 @@ pub use weights::WeightInfo; type AccountIdLookupOf = <::Lookup as StaticLookup>::Source; -pub trait Incrementable { - fn increment(&self) -> Self; - fn initial_value() -> Self; -} - -macro_rules! impl_incrementable { - ($($type:ty),+) => { - $( - impl Incrementable for $type { - fn increment(&self) -> Self { - self.saturating_add(1) - } - - fn initial_value() -> Self { - 0 - } - } - )+ - }; -} - -impl_incrementable!(u8, u16, u32, u64, u128, i8, i16, i32, i64, i128); - #[frame_support::pallet] pub mod pallet { use super::*; @@ -186,6 +164,10 @@ pub mod pallet { #[pallet::constant] type MaxDeadlineDuration: Get<::BlockNumber>; + /// Disables some of pallet's features. + #[pallet::constant] + type Features: Get; + #[cfg(feature = "runtime-benchmarks")] /// A set of helper functions for benchmarking. type Helper: BenchmarkHelper; @@ -194,15 +176,9 @@ pub mod pallet { type WeightInfo: WeightInfo; } - pub type ApprovalsOf = BoundedBTreeMap< - ::AccountId, - Option<::BlockNumber>, - >::ApprovalsLimit, - >; - + /// Details of a collection. #[pallet::storage] #[pallet::storage_prefix = "Class"] - /// Details of a collection. pub(super) type Collection, I: 'static = ()> = StorageMap< _, Blake2_128Concat, @@ -210,14 +186,14 @@ pub mod pallet { CollectionDetails>, >; - #[pallet::storage] /// The collection, if any, of which an account is willing to take ownership. + #[pallet::storage] pub(super) type OwnershipAcceptance, I: 'static = ()> = StorageMap<_, Blake2_128Concat, T::AccountId, T::CollectionId>; - #[pallet::storage] /// The items held by any given account; set out this way so that items owned by a single /// account can be enumerated. + #[pallet::storage] pub(super) type Account, I: 'static = ()> = StorageNMap< _, ( @@ -229,10 +205,10 @@ pub mod pallet { OptionQuery, >; - #[pallet::storage] - #[pallet::storage_prefix = "ClassAccount"] /// The collections owned by any given account; set out this way so that collections owned by /// a single account can be enumerated. + #[pallet::storage] + #[pallet::storage_prefix = "ClassAccount"] pub(super) type CollectionAccount, I: 'static = ()> = StorageDoubleMap< _, Blake2_128Concat, @@ -243,9 +219,9 @@ pub mod pallet { OptionQuery, >; + /// The items in existence and their ownership details. #[pallet::storage] #[pallet::storage_prefix = "Asset"] - /// The items in existence and their ownership details. pub(super) type Item, I: 'static = ()> = StorageDoubleMap< _, Blake2_128Concat, @@ -256,9 +232,9 @@ pub mod pallet { OptionQuery, >; + /// Metadata of a collection. #[pallet::storage] #[pallet::storage_prefix = "ClassMetadataOf"] - /// Metadata of a collection. pub(super) type CollectionMetadataOf, I: 'static = ()> = StorageMap< _, Blake2_128Concat, @@ -267,9 +243,9 @@ pub mod pallet { OptionQuery, >; + /// Metadata of an item. #[pallet::storage] #[pallet::storage_prefix = "InstanceMetadataOf"] - /// Metadata of an item. pub(super) type ItemMetadataOf, I: 'static = ()> = StorageDoubleMap< _, Blake2_128Concat, @@ -280,8 +256,8 @@ pub mod pallet { OptionQuery, >; - #[pallet::storage] /// Attributes of a collection. + #[pallet::storage] pub(super) type Attribute, I: 'static = ()> = StorageNMap< _, ( @@ -293,8 +269,8 @@ pub mod pallet { OptionQuery, >; - #[pallet::storage] /// Price of an asset instance. + #[pallet::storage] pub(super) type ItemPriceOf, I: 'static = ()> = StorageDoubleMap< _, Blake2_128Concat, @@ -305,19 +281,19 @@ pub mod pallet { OptionQuery, >; - #[pallet::storage] /// Keeps track of the number of items a collection might have. + #[pallet::storage] pub(super) type CollectionMaxSupply, I: 'static = ()> = StorageMap<_, Blake2_128Concat, T::CollectionId, u32, OptionQuery>; - #[pallet::storage] /// Stores the `CollectionId` that is going to be used for the next collection. /// This gets incremented by 1 whenever a new collection is created. + #[pallet::storage] pub(super) type NextCollectionId, I: 'static = ()> = StorageValue<_, T::CollectionId, OptionQuery>; - #[pallet::storage] /// Handles all the pending swaps. + #[pallet::storage] pub(super) type PendingSwapOf, I: 'static = ()> = StorageDoubleMap< _, Blake2_128Concat, @@ -333,6 +309,23 @@ pub mod pallet { OptionQuery, >; + /// Config of a collection. + #[pallet::storage] + pub(super) type CollectionConfigOf, I: 'static = ()> = + StorageMap<_, Blake2_128Concat, T::CollectionId, CollectionConfig, OptionQuery>; + + /// Config of an item. + #[pallet::storage] + pub(super) type ItemConfigOf, I: 'static = ()> = StorageDoubleMap< + _, + Blake2_128Concat, + T::CollectionId, + Blake2_128Concat, + T::ItemId, + ItemConfig, + OptionQuery, + >; + #[pallet::event] #[pallet::generate_deposit(pub(super) fn deposit_event)] pub enum Event, I: 'static = ()> { @@ -353,14 +346,19 @@ pub mod pallet { }, /// An `item` was destroyed. Burned { collection: T::CollectionId, item: T::ItemId, owner: T::AccountId }, - /// Some `item` was frozen. - Frozen { collection: T::CollectionId, item: T::ItemId }, - /// Some `item` was thawed. - Thawed { collection: T::CollectionId, item: T::ItemId }, - /// Some `collection` was frozen. - CollectionFrozen { collection: T::CollectionId }, - /// Some `collection` was thawed. - CollectionThawed { collection: T::CollectionId }, + /// An `item` became non-transferable. + ItemTransferLocked { collection: T::CollectionId, item: T::ItemId }, + /// An `item` became transferable. + ItemTransferUnlocked { collection: T::CollectionId, item: T::ItemId }, + /// `item` metadata or attributes were locked. + ItemPropertiesLocked { + collection: T::CollectionId, + item: T::ItemId, + lock_metadata: bool, + lock_attributes: bool, + }, + /// Some `collection` was locked. + CollectionLocked { collection: T::CollectionId }, /// The owner changed. OwnerChanged { collection: T::CollectionId, new_owner: T::AccountId }, /// The management team changed. @@ -390,13 +388,9 @@ pub mod pallet { /// All approvals of an item got cancelled. AllApprovalsCancelled { collection: T::CollectionId, item: T::ItemId, owner: T::AccountId }, /// A `collection` has had its attributes changed by the `Force` origin. - ItemStatusChanged { collection: T::CollectionId }, + CollectionStatusChanged { collection: T::CollectionId }, /// New metadata has been set for a `collection`. - CollectionMetadataSet { - collection: T::CollectionId, - data: BoundedVec, - is_frozen: bool, - }, + CollectionMetadataSet { collection: T::CollectionId, data: BoundedVec }, /// Metadata has been cleared for a `collection`. CollectionMetadataCleared { collection: T::CollectionId }, /// New metadata has been set for an item. @@ -404,7 +398,6 @@ pub mod pallet { collection: T::CollectionId, item: T::ItemId, data: BoundedVec, - is_frozen: bool, }, /// Metadata has been cleared for an item. MetadataCleared { collection: T::CollectionId, item: T::ItemId }, @@ -429,6 +422,8 @@ pub mod pallet { CollectionMaxSupplySet { collection: T::CollectionId, max_supply: u32 }, /// Event gets emmited when the `NextCollectionId` gets incremented. NextCollectionIdIncremented { next_id: T::CollectionId }, + /// The config of a collection has change. + CollectionConfigChanged { id: T::CollectionId }, /// The price was set for the instance. ItemPriceSet { collection: T::CollectionId, @@ -497,22 +492,30 @@ pub mod pallet { ApprovalExpired, /// The owner turned out to be different to what was expected. WrongOwner, - /// Invalid witness data given. + /// The witness data given does not match the current state of the chain. BadWitness, - /// The item ID is already taken. - InUse, - /// The item or collection is frozen. - Frozen, + /// Collection ID is already taken. + CollectionIdInUse, + /// Items within that collection are non-transferable. + ItemsNotTransferable, /// The provided account is not a delegate. NotDelegate, /// The delegate turned out to be different to what was expected. WrongDelegate, /// No approval exists that would allow the transfer. Unapproved, - /// The named owner has not signed ownership of the collection is acceptable. + /// The named owner has not signed ownership acceptance of the collection. Unaccepted, - /// The item is locked. - Locked, + /// The item is locked (non-transferable). + ItemLocked, + /// Item's attributes are locked. + LockedItemAttributes, + /// Collection's attributes are locked. + LockedCollectionAttributes, + /// Item's metadata is locked. + LockedItemMetadata, + /// Collection's metadata is locked. + LockedCollectionMetadata, /// All items have been minted. MaxSupplyReached, /// The max supply has already been set. @@ -533,6 +536,14 @@ pub mod pallet { DeadlineExpired, /// The duration provided should be less or equal to MaxDeadlineDuration. WrongDuration, + /// The method is disabled by system settings. + MethodDisabled, + /// The provided is setting can't be set. + WrongSetting, + /// Item's config already exists and should be equal to the provided one. + InconsistentItemConfig, + /// Config for a collection or an item can't be found. + NoConfig, } impl, I: 'static> Pallet { @@ -565,19 +576,29 @@ pub mod pallet { /// /// Weight: `O(1)` #[pallet::weight(T::WeightInfo::create())] - pub fn create(origin: OriginFor, admin: AccountIdLookupOf) -> DispatchResult { + pub fn create( + origin: OriginFor, + admin: AccountIdLookupOf, + config: CollectionConfig, + ) -> DispatchResult { let collection = NextCollectionId::::get().unwrap_or(T::CollectionId::initial_value()); let owner = T::CreateOrigin::ensure_origin(origin, &collection)?; let admin = T::Lookup::lookup(admin)?; + // DepositRequired can be disabled by calling the force_create() only + ensure!( + !config.has_disabled_setting(CollectionSetting::DepositRequired), + Error::::WrongSetting + ); + Self::do_create_collection( collection, owner.clone(), admin.clone(), + config, T::CollectionDeposit::get(), - false, Event::Created { collection, creator: owner, owner: admin }, ) } @@ -602,7 +623,7 @@ pub mod pallet { pub fn force_create( origin: OriginFor, owner: AccountIdLookupOf, - free_holding: bool, + config: CollectionConfig, ) -> DispatchResult { T::ForceOrigin::ensure_origin(origin)?; let owner = T::Lookup::lookup(owner)?; @@ -614,8 +635,8 @@ pub mod pallet { collection, owner.clone(), owner.clone(), + config, Zero::zero(), - free_holding, Event::ForceCreated { collection, owner }, ) } @@ -676,11 +697,12 @@ pub mod pallet { collection: T::CollectionId, item: T::ItemId, owner: AccountIdLookupOf, + config: ItemConfig, ) -> DispatchResult { let origin = ensure_signed(origin)?; let owner = T::Lookup::lookup(owner)?; - Self::do_mint(collection, item, owner, |collection_details| { + Self::do_mint(collection, item, owner, config, |collection_details| { ensure!(collection_details.issuer == origin, Error::::NoPermission); Ok(()) }) @@ -758,7 +780,7 @@ pub mod pallet { }) } - /// Reevaluate the deposits on some items. + /// Re-evaluate the deposits on some items. /// /// Origin must be Signed and the sender should be the Owner of the `collection`. /// @@ -786,9 +808,11 @@ pub mod pallet { let mut collection_details = Collection::::get(&collection).ok_or(Error::::UnknownCollection)?; ensure!(collection_details.owner == origin, Error::::NoPermission); - let deposit = match collection_details.free_holding { - true => Zero::zero(), - false => T::ItemDeposit::get(), + + let config = Self::get_collection_config(&collection)?; + let deposit = match config.is_setting_enabled(CollectionSetting::DepositRequired) { + true => T::ItemDeposit::get(), + false => Zero::zero(), }; let mut successful = Vec::with_capacity(items.len()); @@ -829,116 +853,61 @@ pub mod pallet { /// /// Origin must be Signed and the sender should be the Freezer of the `collection`. /// - /// - `collection`: The collection of the item to be frozen. - /// - `item`: The item of the item to be frozen. + /// - `collection`: The collection of the item to be changed. + /// - `item`: The item to become non-transferable. /// - /// Emits `Frozen`. + /// Emits `ItemTransferLocked`. /// /// Weight: `O(1)` - #[pallet::weight(T::WeightInfo::freeze())] - pub fn freeze( + #[pallet::weight(T::WeightInfo::lock_item_transfer())] + pub fn lock_item_transfer( origin: OriginFor, collection: T::CollectionId, item: T::ItemId, ) -> DispatchResult { let origin = ensure_signed(origin)?; - - let mut details = - Item::::get(&collection, &item).ok_or(Error::::UnknownCollection)?; - let collection_details = - Collection::::get(&collection).ok_or(Error::::UnknownCollection)?; - ensure!(collection_details.freezer == origin, Error::::NoPermission); - - details.is_frozen = true; - Item::::insert(&collection, &item, &details); - - Self::deposit_event(Event::::Frozen { collection, item }); - Ok(()) + Self::do_lock_item_transfer(origin, collection, item) } /// Re-allow unprivileged transfer of an item. /// /// Origin must be Signed and the sender should be the Freezer of the `collection`. /// - /// - `collection`: The collection of the item to be thawed. - /// - `item`: The item of the item to be thawed. + /// - `collection`: The collection of the item to be changed. + /// - `item`: The item to become transferable. /// - /// Emits `Thawed`. + /// Emits `ItemTransferUnlocked`. /// /// Weight: `O(1)` - #[pallet::weight(T::WeightInfo::thaw())] - pub fn thaw( + #[pallet::weight(T::WeightInfo::unlock_item_transfer())] + pub fn unlock_item_transfer( origin: OriginFor, collection: T::CollectionId, item: T::ItemId, ) -> DispatchResult { let origin = ensure_signed(origin)?; - - let mut details = - Item::::get(&collection, &item).ok_or(Error::::UnknownCollection)?; - let collection_details = - Collection::::get(&collection).ok_or(Error::::UnknownCollection)?; - ensure!(collection_details.admin == origin, Error::::NoPermission); - - details.is_frozen = false; - Item::::insert(&collection, &item, &details); - - Self::deposit_event(Event::::Thawed { collection, item }); - Ok(()) + Self::do_unlock_item_transfer(origin, collection, item) } - /// Disallow further unprivileged transfers for a whole collection. + /// Disallows specified settings for the whole collection. /// /// Origin must be Signed and the sender should be the Freezer of the `collection`. /// - /// - `collection`: The collection to be frozen. - /// - /// Emits `CollectionFrozen`. + /// - `collection`: The collection to be locked. + /// - `lock_config`: The config with the settings to be locked. /// - /// Weight: `O(1)` - #[pallet::weight(T::WeightInfo::freeze_collection())] - pub fn freeze_collection( - origin: OriginFor, - collection: T::CollectionId, - ) -> DispatchResult { - let origin = ensure_signed(origin)?; - - Collection::::try_mutate(collection, |maybe_details| { - let details = maybe_details.as_mut().ok_or(Error::::UnknownCollection)?; - ensure!(origin == details.freezer, Error::::NoPermission); - - details.is_frozen = true; - - Self::deposit_event(Event::::CollectionFrozen { collection }); - Ok(()) - }) - } - - /// Re-allow unprivileged transfers for a whole collection. - /// - /// Origin must be Signed and the sender should be the Admin of the `collection`. - /// - /// - `collection`: The collection to be thawed. - /// - /// Emits `CollectionThawed`. + /// Note: it's possible to only lock(set) the setting, but not to unset it. + /// Emits `CollectionLocked`. /// /// Weight: `O(1)` - #[pallet::weight(T::WeightInfo::thaw_collection())] - pub fn thaw_collection( + #[pallet::weight(T::WeightInfo::lock_collection())] + pub fn lock_collection( origin: OriginFor, collection: T::CollectionId, + lock_config: CollectionConfig, ) -> DispatchResult { let origin = ensure_signed(origin)?; - - Collection::::try_mutate(collection, |maybe_details| { - let details = maybe_details.as_mut().ok_or(Error::::UnknownCollection)?; - ensure!(origin == details.admin, Error::::NoPermission); - - details.is_frozen = false; - - Self::deposit_event(Event::::CollectionThawed { collection }); - Ok(()) - }) + Self::do_lock_collection(origin, collection, lock_config) } /// Change the Owner of a collection. @@ -1047,6 +1016,10 @@ pub mod pallet { delegate: AccountIdLookupOf, maybe_deadline: Option<::BlockNumber>, ) -> DispatchResult { + ensure!( + Self::is_pallet_feature_enabled(PalletFeature::Approvals), + Error::::MethodDisabled + ); let maybe_check: Option = T::ForceOrigin::try_origin(origin) .map(|_| None) .or_else(|origin| ensure_signed(origin).map(Some).map_err(DispatchError::from))?; @@ -1058,6 +1031,12 @@ pub mod pallet { let mut details = Item::::get(&collection, &item).ok_or(Error::::UnknownCollection)?; + let collection_config = Self::get_collection_config(&collection)?; + ensure!( + collection_config.is_setting_enabled(CollectionSetting::TransferableItems), + Error::::ItemsNotTransferable + ); + if let Some(check) = maybe_check { let permitted = check == collection_details.admin || check == details.owner; ensure!(permitted, Error::::NoPermission); @@ -1189,54 +1168,85 @@ pub mod pallet { Ok(()) } - /// Alter the attributes of a given item. + /// Alter the attributes of a given collection. /// /// Origin must be `ForceOrigin`. /// - /// - `collection`: The identifier of the item. - /// - `owner`: The new Owner of this item. - /// - `issuer`: The new Issuer of this item. - /// - `admin`: The new Admin of this item. - /// - `freezer`: The new Freezer of this item. - /// - `free_holding`: Whether a deposit is taken for holding an item of this collection. - /// - `is_frozen`: Whether this collection is frozen except for permissioned/admin - /// instructions. + /// - `collection`: The identifier of the collection. + /// - `owner`: The new Owner of this collection. + /// - `issuer`: The new Issuer of this collection. + /// - `admin`: The new Admin of this collection. + /// - `freezer`: The new Freezer of this collection. + /// - `config`: Collection's config. /// - /// Emits `ItemStatusChanged` with the identity of the item. + /// Emits `CollectionStatusChanged` with the identity of the item. /// /// Weight: `O(1)` - #[pallet::weight(T::WeightInfo::force_item_status())] - pub fn force_item_status( + #[pallet::weight(T::WeightInfo::force_collection_status())] + pub fn force_collection_status( origin: OriginFor, collection: T::CollectionId, owner: AccountIdLookupOf, issuer: AccountIdLookupOf, admin: AccountIdLookupOf, freezer: AccountIdLookupOf, - free_holding: bool, - is_frozen: bool, + config: CollectionConfig, ) -> DispatchResult { T::ForceOrigin::ensure_origin(origin)?; - Collection::::try_mutate(collection, |maybe_item| { - let mut item = maybe_item.take().ok_or(Error::::UnknownCollection)?; - let old_owner = item.owner; + Collection::::try_mutate(collection, |maybe_collection| { + let mut collection_info = + maybe_collection.take().ok_or(Error::::UnknownCollection)?; + let old_owner = collection_info.owner; let new_owner = T::Lookup::lookup(owner)?; - item.owner = new_owner.clone(); - item.issuer = T::Lookup::lookup(issuer)?; - item.admin = T::Lookup::lookup(admin)?; - item.freezer = T::Lookup::lookup(freezer)?; - item.free_holding = free_holding; - item.is_frozen = is_frozen; - *maybe_item = Some(item); + collection_info.owner = new_owner.clone(); + collection_info.issuer = T::Lookup::lookup(issuer)?; + collection_info.admin = T::Lookup::lookup(admin)?; + collection_info.freezer = T::Lookup::lookup(freezer)?; + *maybe_collection = Some(collection_info); CollectionAccount::::remove(&old_owner, &collection); CollectionAccount::::insert(&new_owner, &collection, ()); + CollectionConfigOf::::insert(&collection, config); - Self::deposit_event(Event::ItemStatusChanged { collection }); + Self::deposit_event(Event::CollectionStatusChanged { collection }); Ok(()) }) } + /// Disallows changing the metadata of attributes of the item. + /// + /// Origin must be either `ForceOrigin` or Signed and the sender should be the Owner of the + /// `collection`. + /// + /// - `collection`: The collection if the `item`. + /// - `item`: An item to be locked. + /// - `lock_config`: The config with the settings to be locked. + /// + /// Note: when the metadata or attributes are locked, it won't be possible the unlock them. + /// Emits `ItemPropertiesLocked`. + /// + /// Weight: `O(1)` + #[pallet::weight(T::WeightInfo::lock_item_properties())] + pub fn lock_item_properties( + origin: OriginFor, + collection: T::CollectionId, + item: T::ItemId, + lock_metadata: bool, + lock_attributes: bool, + ) -> DispatchResult { + let maybe_check_owner = T::ForceOrigin::try_origin(origin) + .map(|_| None) + .or_else(|origin| ensure_signed(origin).map(Some))?; + + Self::do_lock_item_properties( + maybe_check_owner, + collection, + item, + lock_metadata, + lock_attributes, + ) + } + /// Set an attribute for a collection or item. /// /// Origin must be either `ForceOrigin` or Signed and the sender should be the Owner of the @@ -1262,20 +1272,35 @@ pub mod pallet { key: BoundedVec, value: BoundedVec, ) -> DispatchResult { + ensure!( + Self::is_pallet_feature_enabled(PalletFeature::Attributes), + Error::::MethodDisabled + ); let maybe_check_owner = T::ForceOrigin::try_origin(origin) .map(|_| None) .or_else(|origin| ensure_signed(origin).map(Some))?; let mut collection_details = Collection::::get(&collection).ok_or(Error::::UnknownCollection)?; + if let Some(check_owner) = &maybe_check_owner { ensure!(check_owner == &collection_details.owner, Error::::NoPermission); } - let maybe_is_frozen = match maybe_item { - None => CollectionMetadataOf::::get(collection).map(|v| v.is_frozen), - Some(item) => ItemMetadataOf::::get(collection, item).map(|v| v.is_frozen), + + let collection_config = Self::get_collection_config(&collection)?; + match maybe_item { + None => { + ensure!( + collection_config.is_setting_enabled(CollectionSetting::UnlockedAttributes), + Error::::LockedCollectionAttributes + ) + }, + Some(item) => { + let maybe_is_locked = Self::get_item_config(&collection, &item) + .map(|c| c.has_disabled_setting(ItemSetting::UnlockedAttributes))?; + ensure!(!maybe_is_locked, Error::::LockedItemAttributes); + }, }; - ensure!(!maybe_is_frozen.unwrap_or(false), Error::::Frozen); let attribute = Attribute::::get((collection, maybe_item, &key)); if attribute.is_none() { @@ -1284,7 +1309,9 @@ pub mod pallet { let old_deposit = attribute.map_or(Zero::zero(), |m| m.1); collection_details.total_deposit.saturating_reduce(old_deposit); let mut deposit = Zero::zero(); - if !collection_details.free_holding && maybe_check_owner.is_some() { + if collection_config.is_setting_enabled(CollectionSetting::DepositRequired) && + maybe_check_owner.is_some() + { deposit = T::DepositPerByte::get() .saturating_mul(((key.len() + value.len()) as u32).into()) .saturating_add(T::AttributeDepositBase::get()); @@ -1332,11 +1359,28 @@ pub mod pallet { if let Some(check_owner) = &maybe_check_owner { ensure!(check_owner == &collection_details.owner, Error::::NoPermission); } - let maybe_is_frozen = match maybe_item { - None => CollectionMetadataOf::::get(collection).map(|v| v.is_frozen), - Some(item) => ItemMetadataOf::::get(collection, item).map(|v| v.is_frozen), - }; - ensure!(!maybe_is_frozen.unwrap_or(false), Error::::Frozen); + + if maybe_check_owner.is_some() { + match maybe_item { + None => { + let collection_config = Self::get_collection_config(&collection)?; + ensure!( + collection_config + .is_setting_enabled(CollectionSetting::UnlockedAttributes), + Error::::LockedCollectionAttributes + ) + }, + Some(item) => { + // NOTE: if the item was previously burned, the ItemSettings record might + // not exists. In that case, we allow to clear the attribute. + let maybe_is_locked = Self::get_item_config(&collection, &item) + .map_or(false, |c| { + c.has_disabled_setting(ItemSetting::UnlockedAttributes) + }); + ensure!(!maybe_is_locked, Error::::LockedItemAttributes); + }, + }; + } if let Some((_, deposit)) = Attribute::::take((collection, maybe_item, &key)) { collection_details.attributes.saturating_dec(); @@ -1360,7 +1404,6 @@ pub mod pallet { /// - `collection`: The identifier of the collection whose item's metadata to set. /// - `item`: The identifier of the item whose metadata to set. /// - `data`: The general information of this item. Limited in length by `StringLimit`. - /// - `is_frozen`: Whether the metadata should be frozen against further changes. /// /// Emits `MetadataSet`. /// @@ -1371,7 +1414,6 @@ pub mod pallet { collection: T::CollectionId, item: T::ItemId, data: BoundedVec, - is_frozen: bool, ) -> DispatchResult { let maybe_check_owner = T::ForceOrigin::try_origin(origin) .map(|_| None) @@ -1380,21 +1422,29 @@ pub mod pallet { let mut collection_details = Collection::::get(&collection).ok_or(Error::::UnknownCollection)?; + let item_config = Self::get_item_config(&collection, &item)?; + ensure!( + maybe_check_owner.is_none() || + item_config.is_setting_enabled(ItemSetting::UnlockedMetadata), + Error::::LockedItemMetadata + ); + if let Some(check_owner) = &maybe_check_owner { ensure!(check_owner == &collection_details.owner, Error::::NoPermission); } - ItemMetadataOf::::try_mutate_exists(collection, item, |metadata| { - let was_frozen = metadata.as_ref().map_or(false, |m| m.is_frozen); - ensure!(maybe_check_owner.is_none() || !was_frozen, Error::::Frozen); + let collection_config = Self::get_collection_config(&collection)?; + ItemMetadataOf::::try_mutate_exists(collection, item, |metadata| { if metadata.is_none() { collection_details.item_metadatas.saturating_inc(); } let old_deposit = metadata.take().map_or(Zero::zero(), |m| m.deposit); collection_details.total_deposit.saturating_reduce(old_deposit); let mut deposit = Zero::zero(); - if !collection_details.free_holding && maybe_check_owner.is_some() { + if collection_config.is_setting_enabled(CollectionSetting::DepositRequired) && + maybe_check_owner.is_some() + { deposit = T::DepositPerByte::get() .saturating_mul(((data.len()) as u32).into()) .saturating_add(T::MetadataDepositBase::get()); @@ -1406,10 +1456,10 @@ pub mod pallet { } collection_details.total_deposit.saturating_accrue(deposit); - *metadata = Some(ItemMetadata { deposit, data: data.clone(), is_frozen }); + *metadata = Some(ItemMetadata { deposit, data: data.clone() }); Collection::::insert(&collection, &collection_details); - Self::deposit_event(Event::MetadataSet { collection, item, data, is_frozen }); + Self::deposit_event(Event::MetadataSet { collection, item, data }); Ok(()) }) } @@ -1417,7 +1467,7 @@ pub mod pallet { /// Clear the metadata for an item. /// /// Origin must be either `ForceOrigin` or Signed and the sender should be the Owner of the - /// `item`. + /// `collection`. /// /// Any deposit is freed for the collection's owner. /// @@ -1443,14 +1493,17 @@ pub mod pallet { ensure!(check_owner == &collection_details.owner, Error::::NoPermission); } - ItemMetadataOf::::try_mutate_exists(collection, item, |metadata| { - let was_frozen = metadata.as_ref().map_or(false, |m| m.is_frozen); - ensure!(maybe_check_owner.is_none() || !was_frozen, Error::::Frozen); + // NOTE: if the item was previously burned, the ItemSettings record might not exists + let is_locked = Self::get_item_config(&collection, &item) + .map_or(false, |c| c.has_disabled_setting(ItemSetting::UnlockedMetadata)); + + ensure!(maybe_check_owner.is_none() || !is_locked, Error::::LockedItemMetadata); + ItemMetadataOf::::try_mutate_exists(collection, item, |metadata| { if metadata.is_some() { collection_details.item_metadatas.saturating_dec(); } - let deposit = metadata.take().ok_or(Error::::UnknownCollection)?.deposit; + let deposit = metadata.take().ok_or(Error::::UnknownItem)?.deposit; T::Currency::unreserve(&collection_details.owner, deposit); collection_details.total_deposit.saturating_reduce(deposit); @@ -1471,7 +1524,6 @@ pub mod pallet { /// /// - `collection`: The identifier of the item whose metadata to update. /// - `data`: The general information of this item. Limited in length by `StringLimit`. - /// - `is_frozen`: Whether the metadata should be frozen against further changes. /// /// Emits `CollectionMetadataSet`. /// @@ -1481,12 +1533,18 @@ pub mod pallet { origin: OriginFor, collection: T::CollectionId, data: BoundedVec, - is_frozen: bool, ) -> DispatchResult { let maybe_check_owner = T::ForceOrigin::try_origin(origin) .map(|_| None) .or_else(|origin| ensure_signed(origin).map(Some))?; + let collection_config = Self::get_collection_config(&collection)?; + ensure!( + maybe_check_owner.is_none() || + collection_config.is_setting_enabled(CollectionSetting::UnlockedMetadata), + Error::::LockedCollectionMetadata + ); + let mut details = Collection::::get(&collection).ok_or(Error::::UnknownCollection)?; if let Some(check_owner) = &maybe_check_owner { @@ -1494,13 +1552,12 @@ pub mod pallet { } CollectionMetadataOf::::try_mutate_exists(collection, |metadata| { - let was_frozen = metadata.as_ref().map_or(false, |m| m.is_frozen); - ensure!(maybe_check_owner.is_none() || !was_frozen, Error::::Frozen); - let old_deposit = metadata.take().map_or(Zero::zero(), |m| m.deposit); details.total_deposit.saturating_reduce(old_deposit); let mut deposit = Zero::zero(); - if maybe_check_owner.is_some() && !details.free_holding { + if maybe_check_owner.is_some() && + collection_config.is_setting_enabled(CollectionSetting::DepositRequired) + { deposit = T::DepositPerByte::get() .saturating_mul(((data.len()) as u32).into()) .saturating_add(T::MetadataDepositBase::get()); @@ -1514,9 +1571,9 @@ pub mod pallet { Collection::::insert(&collection, details); - *metadata = Some(CollectionMetadata { deposit, data: data.clone(), is_frozen }); + *metadata = Some(CollectionMetadata { deposit, data: data.clone() }); - Self::deposit_event(Event::CollectionMetadataSet { collection, data, is_frozen }); + Self::deposit_event(Event::CollectionMetadataSet { collection, data }); Ok(()) }) } @@ -1548,10 +1605,14 @@ pub mod pallet { ensure!(check_owner == &details.owner, Error::::NoPermission); } - CollectionMetadataOf::::try_mutate_exists(collection, |metadata| { - let was_frozen = metadata.as_ref().map_or(false, |m| m.is_frozen); - ensure!(maybe_check_owner.is_none() || !was_frozen, Error::::Frozen); + let collection_config = Self::get_collection_config(&collection)?; + ensure!( + maybe_check_owner.is_none() || + collection_config.is_setting_enabled(CollectionSetting::UnlockedMetadata), + Error::::LockedCollectionMetadata + ); + CollectionMetadataOf::::try_mutate_exists(collection, |metadata| { let deposit = metadata.take().ok_or(Error::::UnknownCollection)?.deposit; T::Currency::unreserve(&details.owner, deposit); Self::deposit_event(Event::CollectionMetadataCleared { collection }); diff --git a/frame/nfts/src/macros.rs b/frame/nfts/src/macros.rs new file mode 100644 index 0000000000000..07a8f3b9f9556 --- /dev/null +++ b/frame/nfts/src/macros.rs @@ -0,0 +1,74 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +macro_rules! impl_incrementable { + ($($type:ty),+) => { + $( + impl Incrementable for $type { + fn increment(&self) -> Self { + let mut val = self.clone(); + val.saturating_inc(); + val + } + + fn initial_value() -> Self { + 0 + } + } + )+ + }; +} +pub(crate) use impl_incrementable; + +macro_rules! impl_codec_bitflags { + ($wrapper:ty, $size:ty, $bitflag_enum:ty) => { + impl MaxEncodedLen for $wrapper { + fn max_encoded_len() -> usize { + <$size>::max_encoded_len() + } + } + impl Encode for $wrapper { + fn using_encoded R>(&self, f: F) -> R { + self.0.bits().using_encoded(f) + } + } + impl EncodeLike for $wrapper {} + impl Decode for $wrapper { + fn decode( + input: &mut I, + ) -> sp_std::result::Result { + let field = <$size>::decode(input)?; + Ok(Self(BitFlags::from_bits(field as $size).map_err(|_| "invalid value")?)) + } + } + + impl TypeInfo for $wrapper { + type Identity = Self; + + fn type_info() -> Type { + Type::builder() + .path(Path::new("BitFlags", module_path!())) + .type_params(vec![TypeParameter::new("T", Some(meta_type::<$bitflag_enum>()))]) + .composite( + Fields::unnamed() + .field(|f| f.ty::<$size>().type_name(stringify!($bitflag_enum))), + ) + } + } + }; +} +pub(crate) use impl_codec_bitflags; diff --git a/frame/nfts/src/mock.rs b/frame/nfts/src/mock.rs index 23493829eaca7..bbd1625710500 100644 --- a/frame/nfts/src/mock.rs +++ b/frame/nfts/src/mock.rs @@ -21,7 +21,7 @@ use super::*; use crate as pallet_nfts; use frame_support::{ - construct_runtime, + construct_runtime, parameter_types, traits::{AsEnsureOriginWithArg, ConstU32, ConstU64}, }; use sp_core::H256; @@ -84,6 +84,10 @@ impl pallet_balances::Config for Test { type ReserveIdentifier = [u8; 8]; } +parameter_types! { + pub storage Features: PalletFeatures = PalletFeatures::all_enabled(); +} + impl Config for Test { type RuntimeEvent = RuntimeEvent; type CollectionId = u32; @@ -103,6 +107,7 @@ impl Config for Test { type ApprovalsLimit = ConstU32<10>; type MaxTips = ConstU32<10>; type MaxDeadlineDuration = ConstU64<10000>; + type Features = Features; type WeightInfo = (); #[cfg(feature = "runtime-benchmarks")] type Helper = (); diff --git a/frame/nfts/src/tests.rs b/frame/nfts/src/tests.rs index 0d2d0c661b273..1b60fd6431b19 100644 --- a/frame/nfts/src/tests.rs +++ b/frame/nfts/src/tests.rs @@ -92,6 +92,14 @@ fn events() -> Vec> { result } +fn default_collection_config() -> CollectionConfig { + CollectionConfig::disable_settings(CollectionSetting::DepositRequired.into()) +} + +fn default_item_config() -> ItemConfig { + ItemConfig::all_settings_enabled() +} + #[test] fn basic_setup_works() { new_test_ext().execute_with(|| { @@ -102,14 +110,14 @@ fn basic_setup_works() { #[test] fn basic_minting_should_work() { new_test_ext().execute_with(|| { - assert_ok!(Nfts::force_create(RuntimeOrigin::root(), 1, true)); + assert_ok!(Nfts::force_create(RuntimeOrigin::root(), 1, default_collection_config())); assert_eq!(collections(), vec![(1, 0)]); - assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 42, 1)); + assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 42, 1, default_item_config())); assert_eq!(items(), vec![(1, 0, 42)]); - assert_ok!(Nfts::force_create(RuntimeOrigin::root(), 2, true)); + assert_ok!(Nfts::force_create(RuntimeOrigin::root(), 2, default_collection_config())); assert_eq!(collections(), vec![(1, 0), (2, 1)]); - assert_ok!(Nfts::mint(RuntimeOrigin::signed(2), 1, 69, 1)); + assert_ok!(Nfts::mint(RuntimeOrigin::signed(2), 1, 69, 1, default_item_config())); assert_eq!(items(), vec![(1, 0, 42), (1, 1, 69)]); }); } @@ -118,25 +126,29 @@ fn basic_minting_should_work() { fn lifecycle_should_work() { new_test_ext().execute_with(|| { Balances::make_free_balance_be(&1, 100); - assert_ok!(Nfts::create(RuntimeOrigin::signed(1), 1)); + assert_ok!(Nfts::create( + RuntimeOrigin::signed(1), + 1, + CollectionConfig::all_settings_enabled() + )); assert_eq!(Balances::reserved_balance(&1), 2); assert_eq!(collections(), vec![(1, 0)]); - assert_ok!(Nfts::set_collection_metadata(RuntimeOrigin::signed(1), 0, bvec![0, 0], false)); + assert_ok!(Nfts::set_collection_metadata(RuntimeOrigin::signed(1), 0, bvec![0, 0])); assert_eq!(Balances::reserved_balance(&1), 5); assert!(CollectionMetadataOf::::contains_key(0)); - assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 42, 10)); + assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 42, 10, default_item_config())); assert_eq!(Balances::reserved_balance(&1), 6); - assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 69, 20)); + assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 69, 20, default_item_config())); assert_eq!(Balances::reserved_balance(&1), 7); assert_eq!(items(), vec![(10, 0, 42), (20, 0, 69)]); assert_eq!(Collection::::get(0).unwrap().items, 2); assert_eq!(Collection::::get(0).unwrap().item_metadatas, 0); - assert_ok!(Nfts::set_metadata(RuntimeOrigin::signed(1), 0, 42, bvec![42, 42], false)); + assert_ok!(Nfts::set_metadata(RuntimeOrigin::signed(1), 0, 42, bvec![42, 42])); assert_eq!(Balances::reserved_balance(&1), 10); assert!(ItemMetadataOf::::contains_key(0, 42)); - assert_ok!(Nfts::set_metadata(RuntimeOrigin::signed(1), 0, 69, bvec![69, 69], false)); + assert_ok!(Nfts::set_metadata(RuntimeOrigin::signed(1), 0, 69, bvec![69, 69])); assert_eq!(Balances::reserved_balance(&1), 13); assert!(ItemMetadataOf::::contains_key(0, 69)); @@ -147,6 +159,7 @@ fn lifecycle_should_work() { assert_eq!(Balances::reserved_balance(&1), 0); assert!(!Collection::::contains_key(0)); + assert!(!CollectionConfigOf::::contains_key(0)); assert!(!Item::::contains_key(0, 42)); assert!(!Item::::contains_key(0, 69)); assert!(!CollectionMetadataOf::::contains_key(0)); @@ -161,10 +174,14 @@ fn lifecycle_should_work() { fn destroy_with_bad_witness_should_not_work() { new_test_ext().execute_with(|| { Balances::make_free_balance_be(&1, 100); - assert_ok!(Nfts::create(RuntimeOrigin::signed(1), 1)); + assert_ok!(Nfts::create( + RuntimeOrigin::signed(1), + 1, + CollectionConfig::all_settings_enabled() + )); let w = Collection::::get(0).unwrap().destroy_witness(); - assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 42, 1)); + assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 42, 1, default_item_config())); assert_noop!(Nfts::destroy(RuntimeOrigin::signed(1), 0, w), Error::::BadWitness); }); } @@ -172,8 +189,8 @@ fn destroy_with_bad_witness_should_not_work() { #[test] fn mint_should_work() { new_test_ext().execute_with(|| { - assert_ok!(Nfts::force_create(RuntimeOrigin::root(), 1, true)); - assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 42, 1)); + assert_ok!(Nfts::force_create(RuntimeOrigin::root(), 1, default_collection_config())); + assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 42, 1, default_item_config())); assert_eq!(Nfts::owner(0, 42).unwrap(), 1); assert_eq!(collections(), vec![(1, 0)]); assert_eq!(items(), vec![(1, 0, 42)]); @@ -183,8 +200,8 @@ fn mint_should_work() { #[test] fn transfer_should_work() { new_test_ext().execute_with(|| { - assert_ok!(Nfts::force_create(RuntimeOrigin::root(), 1, true)); - assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 42, 2)); + assert_ok!(Nfts::force_create(RuntimeOrigin::root(), 1, default_collection_config())); + assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 42, 2, default_item_config())); assert_ok!(Nfts::transfer(RuntimeOrigin::signed(2), 0, 42, 3)); assert_eq!(items(), vec![(3, 0, 42)]); @@ -195,22 +212,54 @@ fn transfer_should_work() { assert_ok!(Nfts::approve_transfer(RuntimeOrigin::signed(3), 0, 42, 2, None)); assert_ok!(Nfts::transfer(RuntimeOrigin::signed(2), 0, 42, 4)); + + // validate we can't transfer non-transferable items + let collection_id = 1; + assert_ok!(Nfts::force_create( + RuntimeOrigin::root(), + 1, + CollectionConfig::disable_settings( + CollectionSetting::TransferableItems | CollectionSetting::DepositRequired + ) + )); + + assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 1, 1, 42, default_item_config())); + + assert_noop!( + Nfts::transfer(RuntimeOrigin::signed(1), collection_id, 42, 3,), + Error::::ItemsNotTransferable + ); }); } #[test] -fn freezing_should_work() { +fn locking_transfer_should_work() { new_test_ext().execute_with(|| { - assert_ok!(Nfts::force_create(RuntimeOrigin::root(), 1, true)); - assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 42, 1)); - assert_ok!(Nfts::freeze(RuntimeOrigin::signed(1), 0, 42)); - assert_noop!(Nfts::transfer(RuntimeOrigin::signed(1), 0, 42, 2), Error::::Frozen); + assert_ok!(Nfts::force_create(RuntimeOrigin::root(), 1, default_collection_config())); + assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 42, 1, default_item_config())); + assert_ok!(Nfts::lock_item_transfer(RuntimeOrigin::signed(1), 0, 42)); + assert_noop!(Nfts::transfer(RuntimeOrigin::signed(1), 0, 42, 2), Error::::ItemLocked); - assert_ok!(Nfts::thaw(RuntimeOrigin::signed(1), 0, 42)); - assert_ok!(Nfts::freeze_collection(RuntimeOrigin::signed(1), 0)); - assert_noop!(Nfts::transfer(RuntimeOrigin::signed(1), 0, 42, 2), Error::::Frozen); + assert_ok!(Nfts::unlock_item_transfer(RuntimeOrigin::signed(1), 0, 42)); + assert_ok!(Nfts::lock_collection( + RuntimeOrigin::signed(1), + 0, + CollectionConfig::disable_settings(CollectionSetting::TransferableItems.into()) + )); + assert_noop!( + Nfts::transfer(RuntimeOrigin::signed(1), 0, 42, 2), + Error::::ItemsNotTransferable + ); - assert_ok!(Nfts::thaw_collection(RuntimeOrigin::signed(1), 0)); + assert_ok!(Nfts::force_collection_status( + RuntimeOrigin::root(), + 0, + 1, + 1, + 1, + 1, + CollectionConfig::all_settings_enabled(), + )); assert_ok!(Nfts::transfer(RuntimeOrigin::signed(1), 0, 42, 2)); }); } @@ -218,8 +267,8 @@ fn freezing_should_work() { #[test] fn origin_guards_should_work() { new_test_ext().execute_with(|| { - assert_ok!(Nfts::force_create(RuntimeOrigin::root(), 1, true)); - assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 42, 1)); + assert_ok!(Nfts::force_create(RuntimeOrigin::root(), 1, default_collection_config())); + assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 42, 1, default_item_config())); Balances::make_free_balance_be(&2, 100); assert_ok!(Nfts::set_accept_ownership(RuntimeOrigin::signed(2), Some(0))); @@ -231,9 +280,18 @@ fn origin_guards_should_work() { Nfts::set_team(RuntimeOrigin::signed(2), 0, 2, 2, 2), Error::::NoPermission ); - assert_noop!(Nfts::freeze(RuntimeOrigin::signed(2), 0, 42), Error::::NoPermission); - assert_noop!(Nfts::thaw(RuntimeOrigin::signed(2), 0, 42), Error::::NoPermission); - assert_noop!(Nfts::mint(RuntimeOrigin::signed(2), 0, 69, 2), Error::::NoPermission); + assert_noop!( + Nfts::lock_item_transfer(RuntimeOrigin::signed(2), 0, 42), + Error::::NoPermission + ); + assert_noop!( + Nfts::unlock_item_transfer(RuntimeOrigin::signed(2), 0, 42), + Error::::NoPermission + ); + assert_noop!( + Nfts::mint(RuntimeOrigin::signed(2), 0, 69, 2, default_item_config()), + Error::::NoPermission + ); assert_noop!( Nfts::burn(RuntimeOrigin::signed(2), 0, 42, None), Error::::NoPermission @@ -249,7 +307,11 @@ fn transfer_owner_should_work() { Balances::make_free_balance_be(&1, 100); Balances::make_free_balance_be(&2, 100); Balances::make_free_balance_be(&3, 100); - assert_ok!(Nfts::create(RuntimeOrigin::signed(1), 1)); + assert_ok!(Nfts::create( + RuntimeOrigin::signed(1), + 1, + CollectionConfig::all_settings_enabled() + )); assert_eq!(collections(), vec![(1, 0)]); assert_noop!( Nfts::transfer_ownership(RuntimeOrigin::signed(1), 0, 2), @@ -271,14 +333,9 @@ fn transfer_owner_should_work() { ); // Mint and set metadata now and make sure that deposit gets transferred back. - assert_ok!(Nfts::set_collection_metadata( - RuntimeOrigin::signed(2), - 0, - bvec![0u8; 20], - false - )); - assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 42, 1)); - assert_ok!(Nfts::set_metadata(RuntimeOrigin::signed(2), 0, 42, bvec![0u8; 20], false)); + assert_ok!(Nfts::set_collection_metadata(RuntimeOrigin::signed(2), 0, bvec![0u8; 20])); + assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 42, 1, default_item_config())); + assert_ok!(Nfts::set_metadata(RuntimeOrigin::signed(2), 0, 42, bvec![0u8; 20])); assert_ok!(Nfts::set_accept_ownership(RuntimeOrigin::signed(3), Some(0))); assert_ok!(Nfts::transfer_ownership(RuntimeOrigin::signed(2), 0, 3)); assert_eq!(collections(), vec![(3, 0)]); @@ -299,12 +356,12 @@ fn transfer_owner_should_work() { #[test] fn set_team_should_work() { new_test_ext().execute_with(|| { - assert_ok!(Nfts::force_create(RuntimeOrigin::root(), 1, true)); + assert_ok!(Nfts::force_create(RuntimeOrigin::root(), 1, default_collection_config())); assert_ok!(Nfts::set_team(RuntimeOrigin::signed(1), 0, 2, 3, 4)); - assert_ok!(Nfts::mint(RuntimeOrigin::signed(2), 0, 42, 2)); - assert_ok!(Nfts::freeze(RuntimeOrigin::signed(4), 0, 42)); - assert_ok!(Nfts::thaw(RuntimeOrigin::signed(3), 0, 42)); + assert_ok!(Nfts::mint(RuntimeOrigin::signed(2), 0, 42, 2, default_item_config())); + assert_ok!(Nfts::lock_item_transfer(RuntimeOrigin::signed(4), 0, 42)); + assert_ok!(Nfts::unlock_item_transfer(RuntimeOrigin::signed(4), 0, 42)); assert_ok!(Nfts::transfer(RuntimeOrigin::signed(3), 0, 42, 3)); assert_ok!(Nfts::burn(RuntimeOrigin::signed(3), 0, 42, None)); }); @@ -315,70 +372,59 @@ fn set_collection_metadata_should_work() { new_test_ext().execute_with(|| { // Cannot add metadata to unknown item assert_noop!( - Nfts::set_collection_metadata(RuntimeOrigin::signed(1), 0, bvec![0u8; 20], false), + Nfts::set_collection_metadata(RuntimeOrigin::signed(1), 0, bvec![0u8; 20]), Error::::UnknownCollection, ); - assert_ok!(Nfts::force_create(RuntimeOrigin::root(), 1, false)); + assert_ok!(Nfts::force_create( + RuntimeOrigin::root(), + 1, + CollectionConfig::all_settings_enabled() + )); // Cannot add metadata to unowned item assert_noop!( - Nfts::set_collection_metadata(RuntimeOrigin::signed(2), 0, bvec![0u8; 20], false), + Nfts::set_collection_metadata(RuntimeOrigin::signed(2), 0, bvec![0u8; 20]), Error::::NoPermission, ); // Successfully add metadata and take deposit Balances::make_free_balance_be(&1, 30); - assert_ok!(Nfts::set_collection_metadata( - RuntimeOrigin::signed(1), - 0, - bvec![0u8; 20], - false - )); + assert_ok!(Nfts::set_collection_metadata(RuntimeOrigin::signed(1), 0, bvec![0u8; 20])); assert_eq!(Balances::free_balance(&1), 9); assert!(CollectionMetadataOf::::contains_key(0)); // Force origin works, too. - assert_ok!(Nfts::set_collection_metadata(RuntimeOrigin::root(), 0, bvec![0u8; 18], false)); + assert_ok!(Nfts::set_collection_metadata(RuntimeOrigin::root(), 0, bvec![0u8; 18])); // Update deposit - assert_ok!(Nfts::set_collection_metadata( - RuntimeOrigin::signed(1), - 0, - bvec![0u8; 15], - false - )); + assert_ok!(Nfts::set_collection_metadata(RuntimeOrigin::signed(1), 0, bvec![0u8; 15])); assert_eq!(Balances::free_balance(&1), 14); - assert_ok!(Nfts::set_collection_metadata( - RuntimeOrigin::signed(1), - 0, - bvec![0u8; 25], - false - )); + assert_ok!(Nfts::set_collection_metadata(RuntimeOrigin::signed(1), 0, bvec![0u8; 25])); assert_eq!(Balances::free_balance(&1), 4); // Cannot over-reserve assert_noop!( - Nfts::set_collection_metadata(RuntimeOrigin::signed(1), 0, bvec![0u8; 40], false), + Nfts::set_collection_metadata(RuntimeOrigin::signed(1), 0, bvec![0u8; 40]), BalancesError::::InsufficientBalance, ); // Can't set or clear metadata once frozen - assert_ok!(Nfts::set_collection_metadata( + assert_ok!(Nfts::set_collection_metadata(RuntimeOrigin::signed(1), 0, bvec![0u8; 15])); + assert_ok!(Nfts::lock_collection( RuntimeOrigin::signed(1), 0, - bvec![0u8; 15], - true + CollectionConfig::disable_settings(CollectionSetting::UnlockedMetadata.into()) )); assert_noop!( - Nfts::set_collection_metadata(RuntimeOrigin::signed(1), 0, bvec![0u8; 15], false), - Error::::Frozen, + Nfts::set_collection_metadata(RuntimeOrigin::signed(1), 0, bvec![0u8; 15]), + Error::::LockedCollectionMetadata, ); assert_noop!( Nfts::clear_collection_metadata(RuntimeOrigin::signed(1), 0), - Error::::Frozen + Error::::LockedCollectionMetadata ); // Clear Metadata - assert_ok!(Nfts::set_collection_metadata(RuntimeOrigin::root(), 0, bvec![0u8; 15], false)); + assert_ok!(Nfts::set_collection_metadata(RuntimeOrigin::root(), 0, bvec![0u8; 15])); assert_noop!( Nfts::clear_collection_metadata(RuntimeOrigin::signed(2), 0), Error::::NoPermission @@ -387,7 +433,11 @@ fn set_collection_metadata_should_work() { Nfts::clear_collection_metadata(RuntimeOrigin::signed(1), 1), Error::::UnknownCollection ); - assert_ok!(Nfts::clear_collection_metadata(RuntimeOrigin::signed(1), 0)); + assert_noop!( + Nfts::clear_collection_metadata(RuntimeOrigin::signed(1), 0), + Error::::LockedCollectionMetadata + ); + assert_ok!(Nfts::clear_collection_metadata(RuntimeOrigin::root(), 0)); assert!(!CollectionMetadataOf::::contains_key(0)); }); } @@ -398,53 +448,61 @@ fn set_item_metadata_should_work() { Balances::make_free_balance_be(&1, 30); // Cannot add metadata to unknown item - assert_ok!(Nfts::force_create(RuntimeOrigin::root(), 1, false)); - assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 42, 1)); + assert_ok!(Nfts::force_create( + RuntimeOrigin::root(), + 1, + CollectionConfig::all_settings_enabled() + )); + assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 42, 1, default_item_config())); // Cannot add metadata to unowned item assert_noop!( - Nfts::set_metadata(RuntimeOrigin::signed(2), 0, 42, bvec![0u8; 20], false), + Nfts::set_metadata(RuntimeOrigin::signed(2), 0, 42, bvec![0u8; 20]), Error::::NoPermission, ); // Successfully add metadata and take deposit - assert_ok!(Nfts::set_metadata(RuntimeOrigin::signed(1), 0, 42, bvec![0u8; 20], false)); + assert_ok!(Nfts::set_metadata(RuntimeOrigin::signed(1), 0, 42, bvec![0u8; 20])); assert_eq!(Balances::free_balance(&1), 8); assert!(ItemMetadataOf::::contains_key(0, 42)); // Force origin works, too. - assert_ok!(Nfts::set_metadata(RuntimeOrigin::root(), 0, 42, bvec![0u8; 18], false)); + assert_ok!(Nfts::set_metadata(RuntimeOrigin::root(), 0, 42, bvec![0u8; 18])); // Update deposit - assert_ok!(Nfts::set_metadata(RuntimeOrigin::signed(1), 0, 42, bvec![0u8; 15], false)); + assert_ok!(Nfts::set_metadata(RuntimeOrigin::signed(1), 0, 42, bvec![0u8; 15])); assert_eq!(Balances::free_balance(&1), 13); - assert_ok!(Nfts::set_metadata(RuntimeOrigin::signed(1), 0, 42, bvec![0u8; 25], false)); + assert_ok!(Nfts::set_metadata(RuntimeOrigin::signed(1), 0, 42, bvec![0u8; 25])); assert_eq!(Balances::free_balance(&1), 3); // Cannot over-reserve assert_noop!( - Nfts::set_metadata(RuntimeOrigin::signed(1), 0, 42, bvec![0u8; 40], false), + Nfts::set_metadata(RuntimeOrigin::signed(1), 0, 42, bvec![0u8; 40]), BalancesError::::InsufficientBalance, ); // Can't set or clear metadata once frozen - assert_ok!(Nfts::set_metadata(RuntimeOrigin::signed(1), 0, 42, bvec![0u8; 15], true)); + assert_ok!(Nfts::set_metadata(RuntimeOrigin::signed(1), 0, 42, bvec![0u8; 15])); + assert_ok!(Nfts::lock_item_properties(RuntimeOrigin::signed(1), 0, 42, true, false)); + assert_noop!( + Nfts::set_metadata(RuntimeOrigin::signed(1), 0, 42, bvec![0u8; 15]), + Error::::LockedItemMetadata, + ); assert_noop!( - Nfts::set_metadata(RuntimeOrigin::signed(1), 0, 42, bvec![0u8; 15], false), - Error::::Frozen, + Nfts::clear_metadata(RuntimeOrigin::signed(1), 0, 42), + Error::::LockedItemMetadata, ); - assert_noop!(Nfts::clear_metadata(RuntimeOrigin::signed(1), 0, 42), Error::::Frozen); // Clear Metadata - assert_ok!(Nfts::set_metadata(RuntimeOrigin::root(), 0, 42, bvec![0u8; 15], false)); + assert_ok!(Nfts::set_metadata(RuntimeOrigin::root(), 0, 42, bvec![0u8; 15])); assert_noop!( Nfts::clear_metadata(RuntimeOrigin::signed(2), 0, 42), - Error::::NoPermission + Error::::NoPermission, ); assert_noop!( Nfts::clear_metadata(RuntimeOrigin::signed(1), 1, 42), - Error::::UnknownCollection + Error::::UnknownCollection, ); - assert_ok!(Nfts::clear_metadata(RuntimeOrigin::signed(1), 0, 42)); + assert_ok!(Nfts::clear_metadata(RuntimeOrigin::root(), 0, 42)); assert!(!ItemMetadataOf::::contains_key(0, 42)); }); } @@ -454,7 +512,12 @@ fn set_attribute_should_work() { new_test_ext().execute_with(|| { Balances::make_free_balance_be(&1, 100); - assert_ok!(Nfts::force_create(RuntimeOrigin::root(), 1, false)); + assert_ok!(Nfts::force_create( + RuntimeOrigin::root(), + 1, + CollectionConfig::all_settings_enabled() + )); + assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 0, 1, default_item_config())); assert_ok!(Nfts::set_attribute(RuntimeOrigin::signed(1), 0, None, bvec![0], bvec![0])); assert_ok!(Nfts::set_attribute(RuntimeOrigin::signed(1), 0, Some(0), bvec![0], bvec![0])); @@ -467,7 +530,7 @@ fn set_attribute_should_work() { (Some(0), bvec![1], bvec![0]), ] ); - assert_eq!(Balances::reserved_balance(1), 9); + assert_eq!(Balances::reserved_balance(1), 10); assert_ok!(Nfts::set_attribute(RuntimeOrigin::signed(1), 0, None, bvec![0], bvec![0; 10])); assert_eq!( @@ -478,14 +541,14 @@ fn set_attribute_should_work() { (Some(0), bvec![1], bvec![0]), ] ); - assert_eq!(Balances::reserved_balance(1), 18); + assert_eq!(Balances::reserved_balance(1), 19); assert_ok!(Nfts::clear_attribute(RuntimeOrigin::signed(1), 0, Some(0), bvec![1])); assert_eq!( attributes(0), vec![(None, bvec![0], bvec![0; 10]), (Some(0), bvec![0], bvec![0]),] ); - assert_eq!(Balances::reserved_balance(1), 15); + assert_eq!(Balances::reserved_balance(1), 16); let w = Collection::::get(0).unwrap().destroy_witness(); assert_ok!(Nfts::destroy(RuntimeOrigin::signed(1), 0, w)); @@ -495,11 +558,17 @@ fn set_attribute_should_work() { } #[test] -fn set_attribute_should_respect_freeze() { +fn set_attribute_should_respect_lock() { new_test_ext().execute_with(|| { Balances::make_free_balance_be(&1, 100); - assert_ok!(Nfts::force_create(RuntimeOrigin::root(), 1, false)); + assert_ok!(Nfts::force_create( + RuntimeOrigin::root(), + 1, + CollectionConfig::all_settings_enabled() + )); + assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 0, 1, default_item_config())); + assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 1, 1, default_item_config())); assert_ok!(Nfts::set_attribute(RuntimeOrigin::signed(1), 0, None, bvec![0], bvec![0])); assert_ok!(Nfts::set_attribute(RuntimeOrigin::signed(1), 0, Some(0), bvec![0], bvec![0])); @@ -512,15 +581,21 @@ fn set_attribute_should_respect_freeze() { (Some(1), bvec![0], bvec![0]), ] ); - assert_eq!(Balances::reserved_balance(1), 9); + assert_eq!(Balances::reserved_balance(1), 11); - assert_ok!(Nfts::set_collection_metadata(RuntimeOrigin::signed(1), 0, bvec![], true)); - let e = Error::::Frozen; + assert_ok!(Nfts::set_collection_metadata(RuntimeOrigin::signed(1), 0, bvec![])); + assert_ok!(Nfts::lock_collection( + RuntimeOrigin::signed(1), + 0, + CollectionConfig::disable_settings(CollectionSetting::UnlockedAttributes.into()) + )); + + let e = Error::::LockedCollectionAttributes; assert_noop!(Nfts::set_attribute(RuntimeOrigin::signed(1), 0, None, bvec![0], bvec![0]), e); assert_ok!(Nfts::set_attribute(RuntimeOrigin::signed(1), 0, Some(0), bvec![0], bvec![1])); - assert_ok!(Nfts::set_metadata(RuntimeOrigin::signed(1), 0, 0, bvec![], true)); - let e = Error::::Frozen; + assert_ok!(Nfts::lock_item_properties(RuntimeOrigin::signed(1), 0, 0, false, true)); + let e = Error::::LockedItemAttributes; assert_noop!( Nfts::set_attribute(RuntimeOrigin::signed(1), 0, Some(0), bvec![0], bvec![1]), e @@ -530,36 +605,87 @@ fn set_attribute_should_respect_freeze() { } #[test] -fn force_item_status_should_work() { +fn preserve_config_for_frozen_items() { + new_test_ext().execute_with(|| { + Balances::make_free_balance_be(&1, 100); + + assert_ok!(Nfts::force_create( + RuntimeOrigin::root(), + 1, + CollectionConfig::all_settings_enabled() + )); + assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 0, 1, default_item_config())); + assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 1, 1, default_item_config())); + + // if the item is not locked/frozen then the config gets deleted on item burn + assert_ok!(Nfts::burn(RuntimeOrigin::signed(1), 0, 1, Some(1))); + assert!(!ItemConfigOf::::contains_key(0, 1)); + + // lock the item and ensure the config stays unchanged + assert_ok!(Nfts::lock_item_properties(RuntimeOrigin::signed(1), 0, 0, true, true)); + + let expect_config = + ItemConfig(ItemSetting::UnlockedAttributes | ItemSetting::UnlockedMetadata); + let config = ItemConfigOf::::get(0, 0).unwrap(); + assert_eq!(config, expect_config); + + assert_ok!(Nfts::burn(RuntimeOrigin::signed(1), 0, 0, Some(1))); + let config = ItemConfigOf::::get(0, 0).unwrap(); + assert_eq!(config, expect_config); + + // can't mint with the different config + assert_noop!( + Nfts::mint(RuntimeOrigin::signed(1), 0, 0, 1, default_item_config()), + Error::::InconsistentItemConfig + ); + + assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 0, 1, expect_config)); + }); +} + +#[test] +fn force_collection_status_should_work() { new_test_ext().execute_with(|| { Balances::make_free_balance_be(&1, 100); - assert_ok!(Nfts::force_create(RuntimeOrigin::root(), 1, false)); - assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 42, 1)); - assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 69, 2)); - assert_ok!(Nfts::set_collection_metadata(RuntimeOrigin::signed(1), 0, bvec![0; 20], false)); - assert_ok!(Nfts::set_metadata(RuntimeOrigin::signed(1), 0, 42, bvec![0; 20], false)); - assert_ok!(Nfts::set_metadata(RuntimeOrigin::signed(1), 0, 69, bvec![0; 20], false)); + assert_ok!(Nfts::force_create( + RuntimeOrigin::root(), + 1, + CollectionConfig::all_settings_enabled() + )); + assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 42, 1, default_item_config())); + assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 69, 2, default_item_config())); + assert_ok!(Nfts::set_collection_metadata(RuntimeOrigin::signed(1), 0, bvec![0; 20])); + assert_ok!(Nfts::set_metadata(RuntimeOrigin::signed(1), 0, 42, bvec![0; 20])); + assert_ok!(Nfts::set_metadata(RuntimeOrigin::signed(1), 0, 69, bvec![0; 20])); assert_eq!(Balances::reserved_balance(1), 65); // force item status to be free holding - assert_ok!(Nfts::force_item_status(RuntimeOrigin::root(), 0, 1, 1, 1, 1, true, false)); - assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 142, 1)); - assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 169, 2)); - assert_ok!(Nfts::set_metadata(RuntimeOrigin::signed(1), 0, 142, bvec![0; 20], false)); - assert_ok!(Nfts::set_metadata(RuntimeOrigin::signed(1), 0, 169, bvec![0; 20], false)); + assert_ok!(Nfts::force_collection_status( + RuntimeOrigin::root(), + 0, + 1, + 1, + 1, + 1, + CollectionConfig::disable_settings(CollectionSetting::DepositRequired.into()), + )); + assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 142, 1, default_item_config())); + assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 169, 2, default_item_config())); + assert_ok!(Nfts::set_metadata(RuntimeOrigin::signed(1), 0, 142, bvec![0; 20])); + assert_ok!(Nfts::set_metadata(RuntimeOrigin::signed(1), 0, 169, bvec![0; 20])); assert_eq!(Balances::reserved_balance(1), 65); assert_ok!(Nfts::redeposit(RuntimeOrigin::signed(1), 0, bvec![0, 42, 50, 69, 100])); assert_eq!(Balances::reserved_balance(1), 63); - assert_ok!(Nfts::set_metadata(RuntimeOrigin::signed(1), 0, 42, bvec![0; 20], false)); + assert_ok!(Nfts::set_metadata(RuntimeOrigin::signed(1), 0, 42, bvec![0; 20])); assert_eq!(Balances::reserved_balance(1), 42); - assert_ok!(Nfts::set_metadata(RuntimeOrigin::signed(1), 0, 69, bvec![0; 20], false)); + assert_ok!(Nfts::set_metadata(RuntimeOrigin::signed(1), 0, 69, bvec![0; 20])); assert_eq!(Balances::reserved_balance(1), 21); - assert_ok!(Nfts::set_collection_metadata(RuntimeOrigin::signed(1), 0, bvec![0; 20], false)); + assert_ok!(Nfts::set_collection_metadata(RuntimeOrigin::signed(1), 0, bvec![0; 20])); assert_eq!(Balances::reserved_balance(1), 0); }); } @@ -568,7 +694,11 @@ fn force_item_status_should_work() { fn burn_works() { new_test_ext().execute_with(|| { Balances::make_free_balance_be(&1, 100); - assert_ok!(Nfts::force_create(RuntimeOrigin::root(), 1, false)); + assert_ok!(Nfts::force_create( + RuntimeOrigin::root(), + 1, + CollectionConfig::all_settings_enabled() + )); assert_ok!(Nfts::set_team(RuntimeOrigin::signed(1), 0, 2, 3, 4)); assert_noop!( @@ -576,8 +706,8 @@ fn burn_works() { Error::::UnknownCollection ); - assert_ok!(Nfts::mint(RuntimeOrigin::signed(2), 0, 42, 5)); - assert_ok!(Nfts::mint(RuntimeOrigin::signed(2), 0, 69, 5)); + assert_ok!(Nfts::mint(RuntimeOrigin::signed(2), 0, 42, 5, default_item_config())); + assert_ok!(Nfts::mint(RuntimeOrigin::signed(2), 0, 69, 5, default_item_config())); assert_eq!(Balances::reserved_balance(1), 2); assert_noop!( @@ -598,8 +728,8 @@ fn burn_works() { #[test] fn approval_lifecycle_works() { new_test_ext().execute_with(|| { - assert_ok!(Nfts::force_create(RuntimeOrigin::root(), 1, true)); - assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 42, 2)); + assert_ok!(Nfts::force_create(RuntimeOrigin::root(), 1, default_collection_config())); + assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 42, 2, default_item_config())); assert_ok!(Nfts::approve_transfer(RuntimeOrigin::signed(2), 0, 42, 3, None)); assert_ok!(Nfts::transfer(RuntimeOrigin::signed(3), 0, 42, 4)); assert_noop!( @@ -610,14 +740,37 @@ fn approval_lifecycle_works() { assert_ok!(Nfts::approve_transfer(RuntimeOrigin::signed(4), 0, 42, 2, None)); assert_ok!(Nfts::transfer(RuntimeOrigin::signed(2), 0, 42, 2)); + + // ensure we can't buy an item when the collection has a NonTransferableItems flag + let collection_id = 1; + assert_ok!(Nfts::force_create( + RuntimeOrigin::root(), + 1, + CollectionConfig::disable_settings( + CollectionSetting::TransferableItems | CollectionSetting::DepositRequired + ) + )); + + assert_ok!(Nfts::mint( + RuntimeOrigin::signed(1), + 1, + collection_id, + 1, + default_item_config() + )); + + assert_noop!( + Nfts::approve_transfer(RuntimeOrigin::signed(1), collection_id, 1, 2, None), + Error::::ItemsNotTransferable + ); }); } #[test] fn cancel_approval_works() { new_test_ext().execute_with(|| { - assert_ok!(Nfts::force_create(RuntimeOrigin::root(), 1, true)); - assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 42, 2)); + assert_ok!(Nfts::force_create(RuntimeOrigin::root(), 1, default_collection_config())); + assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 42, 2, default_item_config())); assert_ok!(Nfts::approve_transfer(RuntimeOrigin::signed(2), 0, 42, 3, None)); assert_noop!( @@ -645,7 +798,7 @@ fn cancel_approval_works() { let current_block = 1; System::set_block_number(current_block); - assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 69, 2)); + assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 69, 2, default_item_config())); // approval expires after 2 blocks. assert_ok!(Nfts::approve_transfer(RuntimeOrigin::signed(2), 0, 42, 3, Some(2))); assert_noop!( @@ -663,8 +816,8 @@ fn cancel_approval_works() { #[test] fn approving_multiple_accounts_works() { new_test_ext().execute_with(|| { - assert_ok!(Nfts::force_create(RuntimeOrigin::root(), 1, true)); - assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 42, 2)); + assert_ok!(Nfts::force_create(RuntimeOrigin::root(), 1, default_collection_config())); + assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 42, 2, default_item_config())); let current_block = 1; System::set_block_number(current_block); @@ -688,8 +841,8 @@ fn approving_multiple_accounts_works() { #[test] fn approvals_limit_works() { new_test_ext().execute_with(|| { - assert_ok!(Nfts::force_create(RuntimeOrigin::root(), 1, true)); - assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 42, 2)); + assert_ok!(Nfts::force_create(RuntimeOrigin::root(), 1, default_collection_config())); + assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 42, 2, default_item_config())); for i in 3..13 { assert_ok!(Nfts::approve_transfer(RuntimeOrigin::signed(2), 0, 42, i, None)); @@ -708,8 +861,12 @@ fn approval_deadline_works() { System::set_block_number(0); assert!(System::block_number().is_zero()); - assert_ok!(Nfts::force_create(RuntimeOrigin::root(), 1, true)); - assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 42, 2)); + assert_ok!(Nfts::force_create( + RuntimeOrigin::root(), + 1, + CollectionConfig::disable_settings(CollectionSetting::DepositRequired.into()) + )); + assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 42, 2, default_item_config())); // the approval expires after the 2nd block. assert_ok!(Nfts::approve_transfer(RuntimeOrigin::signed(2), 0, 42, 3, Some(2))); @@ -735,8 +892,8 @@ fn approval_deadline_works() { #[test] fn cancel_approval_works_with_admin() { new_test_ext().execute_with(|| { - assert_ok!(Nfts::force_create(RuntimeOrigin::root(), 1, true)); - assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 42, 2)); + assert_ok!(Nfts::force_create(RuntimeOrigin::root(), 1, default_collection_config())); + assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 42, 2, default_item_config())); assert_ok!(Nfts::approve_transfer(RuntimeOrigin::signed(2), 0, 42, 3, None)); assert_noop!( @@ -763,8 +920,8 @@ fn cancel_approval_works_with_admin() { #[test] fn cancel_approval_works_with_force() { new_test_ext().execute_with(|| { - assert_ok!(Nfts::force_create(RuntimeOrigin::root(), 1, true)); - assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 42, 2)); + assert_ok!(Nfts::force_create(RuntimeOrigin::root(), 1, default_collection_config())); + assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 42, 2, default_item_config())); assert_ok!(Nfts::approve_transfer(RuntimeOrigin::signed(2), 0, 42, 3, None)); assert_noop!( @@ -791,8 +948,8 @@ fn cancel_approval_works_with_force() { #[test] fn clear_all_transfer_approvals_works() { new_test_ext().execute_with(|| { - assert_ok!(Nfts::force_create(RuntimeOrigin::root(), 1, true)); - assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 42, 2)); + assert_ok!(Nfts::force_create(RuntimeOrigin::root(), 1, default_collection_config())); + assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 42, 2, default_item_config())); assert_ok!(Nfts::approve_transfer(RuntimeOrigin::signed(2), 0, 42, 3, None)); assert_ok!(Nfts::approve_transfer(RuntimeOrigin::signed(2), 0, 42, 4, None)); @@ -830,7 +987,7 @@ fn max_supply_should_work() { let max_supply = 2; // validate set_collection_max_supply - assert_ok!(Nfts::force_create(RuntimeOrigin::root(), user_id, true)); + assert_ok!(Nfts::force_create(RuntimeOrigin::root(), user_id, default_collection_config())); assert!(!CollectionMaxSupply::::contains_key(collection_id)); assert_ok!(Nfts::set_collection_max_supply( @@ -855,10 +1012,28 @@ fn max_supply_should_work() { ); // validate we can't mint more to max supply - assert_ok!(Nfts::mint(RuntimeOrigin::signed(user_id), collection_id, 0, user_id)); - assert_ok!(Nfts::mint(RuntimeOrigin::signed(user_id), collection_id, 1, user_id)); + assert_ok!(Nfts::mint( + RuntimeOrigin::signed(user_id), + collection_id, + 0, + user_id, + default_item_config() + )); + assert_ok!(Nfts::mint( + RuntimeOrigin::signed(user_id), + collection_id, + 1, + user_id, + default_item_config() + )); assert_noop!( - Nfts::mint(RuntimeOrigin::signed(user_id), collection_id, 2, user_id), + Nfts::mint( + RuntimeOrigin::signed(user_id), + collection_id, + 2, + user_id, + default_item_config() + ), Error::::MaxSupplyReached ); @@ -880,10 +1055,22 @@ fn set_price_should_work() { let item_1 = 1; let item_2 = 2; - assert_ok!(Nfts::force_create(RuntimeOrigin::root(), user_id, true)); + assert_ok!(Nfts::force_create(RuntimeOrigin::root(), user_id, default_collection_config())); - assert_ok!(Nfts::mint(RuntimeOrigin::signed(user_id), collection_id, item_1, user_id)); - assert_ok!(Nfts::mint(RuntimeOrigin::signed(user_id), collection_id, item_2, user_id)); + assert_ok!(Nfts::mint( + RuntimeOrigin::signed(user_id), + collection_id, + item_1, + user_id, + default_item_config() + )); + assert_ok!(Nfts::mint( + RuntimeOrigin::signed(user_id), + collection_id, + item_2, + user_id, + default_item_config() + )); assert_ok!(Nfts::set_price( RuntimeOrigin::signed(user_id), @@ -929,6 +1116,29 @@ fn set_price_should_work() { item: item_2 })); assert!(!ItemPriceOf::::contains_key(collection_id, item_2)); + + // ensure we can't set price when the items are non-transferable + let collection_id = 1; + assert_ok!(Nfts::force_create( + RuntimeOrigin::root(), + user_id, + CollectionConfig::disable_settings( + CollectionSetting::TransferableItems | CollectionSetting::DepositRequired + ) + )); + + assert_ok!(Nfts::mint( + RuntimeOrigin::signed(user_id), + collection_id, + item_1, + user_id, + default_item_config() + )); + + assert_noop!( + Nfts::set_price(RuntimeOrigin::signed(user_id), collection_id, item_1, Some(2), None), + Error::::ItemsNotTransferable + ); }); } @@ -950,11 +1160,29 @@ fn buy_item_should_work() { Balances::make_free_balance_be(&user_2, initial_balance); Balances::make_free_balance_be(&user_3, initial_balance); - assert_ok!(Nfts::force_create(RuntimeOrigin::root(), user_1, true)); + assert_ok!(Nfts::force_create(RuntimeOrigin::root(), user_1, default_collection_config())); - assert_ok!(Nfts::mint(RuntimeOrigin::signed(user_1), collection_id, item_1, user_1)); - assert_ok!(Nfts::mint(RuntimeOrigin::signed(user_1), collection_id, item_2, user_1)); - assert_ok!(Nfts::mint(RuntimeOrigin::signed(user_1), collection_id, item_3, user_1)); + assert_ok!(Nfts::mint( + RuntimeOrigin::signed(user_1), + collection_id, + item_1, + user_1, + default_item_config(), + )); + assert_ok!(Nfts::mint( + RuntimeOrigin::signed(user_1), + collection_id, + item_2, + user_1, + default_item_config(), + )); + assert_ok!(Nfts::mint( + RuntimeOrigin::signed(user_1), + collection_id, + item_3, + user_1, + default_item_config(), + )); assert_ok!(Nfts::set_price( RuntimeOrigin::signed(user_1), @@ -1024,7 +1252,7 @@ fn buy_item_should_work() { Error::::NotForSale ); - // ensure we can't buy an item when the collection or an item is frozen + // ensure we can't buy an item when the collection or an item are frozen { assert_ok!(Nfts::set_price( RuntimeOrigin::signed(user_1), @@ -1034,8 +1262,12 @@ fn buy_item_should_work() { None, )); - // freeze collection - assert_ok!(Nfts::freeze_collection(RuntimeOrigin::signed(user_1), collection_id)); + // lock the collection + assert_ok!(Nfts::lock_collection( + RuntimeOrigin::signed(user_1), + collection_id, + CollectionConfig::disable_settings(CollectionSetting::TransferableItems.into()) + )); let buy_item_call = mock::RuntimeCall::Nfts(crate::Call::::buy_item { collection: collection_id, @@ -1044,13 +1276,26 @@ fn buy_item_should_work() { }); assert_noop!( buy_item_call.dispatch(RuntimeOrigin::signed(user_2)), - Error::::Frozen + Error::::ItemsNotTransferable ); - assert_ok!(Nfts::thaw_collection(RuntimeOrigin::signed(user_1), collection_id)); + // unlock the collection + assert_ok!(Nfts::force_collection_status( + RuntimeOrigin::root(), + collection_id, + user_1, + user_1, + user_1, + user_1, + CollectionConfig::all_settings_enabled(), + )); - // freeze item - assert_ok!(Nfts::freeze(RuntimeOrigin::signed(user_1), collection_id, item_3)); + // lock the transfer + assert_ok!(Nfts::lock_item_transfer( + RuntimeOrigin::signed(user_1), + collection_id, + item_3 + )); let buy_item_call = mock::RuntimeCall::Nfts(crate::Call::::buy_item { collection: collection_id, @@ -1059,7 +1304,7 @@ fn buy_item_should_work() { }); assert_noop!( buy_item_call.dispatch(RuntimeOrigin::signed(user_2)), - Error::::Frozen + Error::::ItemLocked ); } }); @@ -1124,10 +1369,22 @@ fn create_cancel_swap_should_work() { let duration = 2; let expect_deadline = 3; - assert_ok!(Nfts::force_create(RuntimeOrigin::root(), user_id, true)); + assert_ok!(Nfts::force_create(RuntimeOrigin::root(), user_id, default_collection_config())); - assert_ok!(Nfts::mint(RuntimeOrigin::signed(user_id), collection_id, item_1, user_id)); - assert_ok!(Nfts::mint(RuntimeOrigin::signed(user_id), collection_id, item_2, user_id)); + assert_ok!(Nfts::mint( + RuntimeOrigin::signed(user_id), + collection_id, + item_1, + user_id, + default_item_config(), + )); + assert_ok!(Nfts::mint( + RuntimeOrigin::signed(user_id), + collection_id, + item_2, + user_id, + default_item_config(), + )); // validate desired item and the collection exists assert_noop!( @@ -1262,13 +1519,43 @@ fn claim_swap_should_work() { Balances::make_free_balance_be(&user_1, initial_balance); Balances::make_free_balance_be(&user_2, initial_balance); - assert_ok!(Nfts::force_create(RuntimeOrigin::root(), user_1, true)); + assert_ok!(Nfts::force_create(RuntimeOrigin::root(), user_1, default_collection_config())); - assert_ok!(Nfts::mint(RuntimeOrigin::signed(user_1), collection_id, item_1, user_1)); - assert_ok!(Nfts::mint(RuntimeOrigin::signed(user_1), collection_id, item_2, user_2)); - assert_ok!(Nfts::mint(RuntimeOrigin::signed(user_1), collection_id, item_3, user_2)); - assert_ok!(Nfts::mint(RuntimeOrigin::signed(user_1), collection_id, item_4, user_1)); - assert_ok!(Nfts::mint(RuntimeOrigin::signed(user_1), collection_id, item_5, user_2)); + assert_ok!(Nfts::mint( + RuntimeOrigin::signed(user_1), + collection_id, + item_1, + user_1, + default_item_config(), + )); + assert_ok!(Nfts::mint( + RuntimeOrigin::signed(user_1), + collection_id, + item_2, + user_2, + default_item_config(), + )); + assert_ok!(Nfts::mint( + RuntimeOrigin::signed(user_1), + collection_id, + item_3, + user_2, + default_item_config(), + )); + assert_ok!(Nfts::mint( + RuntimeOrigin::signed(user_1), + collection_id, + item_4, + user_1, + default_item_config(), + )); + assert_ok!(Nfts::mint( + RuntimeOrigin::signed(user_1), + collection_id, + item_5, + user_2, + default_item_config(), + )); assert_ok!(Nfts::create_swap( RuntimeOrigin::signed(user_1), @@ -1418,3 +1705,122 @@ fn claim_swap_should_work() { assert_eq!(Balances::total_balance(&user_2), initial_balance + price); }); } + +#[test] +fn various_collection_settings() { + new_test_ext().execute_with(|| { + // when we set only one value it's required to call .into() on it + let config = + CollectionConfig::disable_settings(CollectionSetting::TransferableItems.into()); + assert_ok!(Nfts::force_create(RuntimeOrigin::root(), 1, config)); + + let config = CollectionConfigOf::::get(0).unwrap(); + assert!(!config.is_setting_enabled(CollectionSetting::TransferableItems)); + assert!(config.is_setting_enabled(CollectionSetting::UnlockedMetadata)); + + // no need to call .into() for multiple values + let config = CollectionConfig::disable_settings( + CollectionSetting::UnlockedMetadata | CollectionSetting::TransferableItems, + ); + assert_ok!(Nfts::force_create(RuntimeOrigin::root(), 1, config)); + + let config = CollectionConfigOf::::get(1).unwrap(); + assert!(!config.is_setting_enabled(CollectionSetting::TransferableItems)); + assert!(!config.is_setting_enabled(CollectionSetting::UnlockedMetadata)); + + assert_ok!(Nfts::force_create(RuntimeOrigin::root(), 1, default_collection_config())); + }); +} + +#[test] +fn collection_locking_should_work() { + new_test_ext().execute_with(|| { + let user_id = 1; + let collection_id = 0; + + assert_ok!(Nfts::force_create( + RuntimeOrigin::root(), + user_id, + CollectionConfig::all_settings_enabled() + )); + + // validate partial lock + let lock_config = CollectionConfig::disable_settings( + CollectionSetting::TransferableItems | CollectionSetting::UnlockedAttributes, + ); + assert_ok!(Nfts::lock_collection( + RuntimeOrigin::signed(user_id), + collection_id, + lock_config, + )); + + let stored_config = CollectionConfigOf::::get(collection_id).unwrap(); + assert_eq!(stored_config, lock_config); + + // validate full lock + let full_lock_config = CollectionConfig::disable_settings( + CollectionSetting::TransferableItems | + CollectionSetting::UnlockedMetadata | + CollectionSetting::UnlockedAttributes, + ); + assert_ok!(Nfts::lock_collection( + RuntimeOrigin::signed(user_id), + collection_id, + CollectionConfig::disable_settings(CollectionSetting::UnlockedMetadata.into()), + )); + + let stored_config = CollectionConfigOf::::get(collection_id).unwrap(); + assert_eq!(stored_config, full_lock_config); + }); +} + +#[test] +fn pallet_level_feature_flags_should_work() { + new_test_ext().execute_with(|| { + Features::set(&PalletFeatures::disable( + PalletFeature::Trading | PalletFeature::Approvals | PalletFeature::Attributes, + )); + + let user_id = 1; + let collection_id = 0; + let item_id = 1; + + assert_ok!(Nfts::force_create(RuntimeOrigin::root(), user_id, default_collection_config())); + + assert_ok!(Nfts::mint( + RuntimeOrigin::signed(user_id), + collection_id, + item_id, + user_id, + default_item_config(), + )); + + // PalletFeature::Trading + assert_noop!( + Nfts::set_price(RuntimeOrigin::signed(user_id), collection_id, item_id, Some(1), None), + Error::::MethodDisabled + ); + assert_noop!( + Nfts::buy_item(RuntimeOrigin::signed(user_id), collection_id, item_id, 1), + Error::::MethodDisabled + ); + + // PalletFeature::Approvals + assert_noop!( + Nfts::approve_transfer(RuntimeOrigin::signed(user_id), collection_id, item_id, 2, None), + Error::::MethodDisabled + ); + + // PalletFeature::Attributes + assert_noop!( + Nfts::set_attribute( + RuntimeOrigin::signed(user_id), + collection_id, + None, + bvec![0], + bvec![0] + ), + Error::::MethodDisabled + ); + }) +} diff --git a/frame/nfts/src/types.rs b/frame/nfts/src/types.rs index 399de3c5dad1e..6ed57e4da25e5 100644 --- a/frame/nfts/src/types.rs +++ b/frame/nfts/src/types.rs @@ -18,16 +18,24 @@ //! Various basic types for use in the Nfts pallet. use super::*; +use crate::macros::*; +use codec::EncodeLike; +use enumflags2::{bitflags, BitFlags}; use frame_support::{ pallet_prelude::{BoundedVec, MaxEncodedLen}, traits::Get, }; -use scale_info::TypeInfo; +use scale_info::{build::Fields, meta_type, Path, Type, TypeInfo, TypeParameter}; pub(super) type DepositBalanceOf = <>::Currency as Currency<::AccountId>>::Balance; pub(super) type CollectionDetailsFor = CollectionDetails<::AccountId, DepositBalanceOf>; +pub(super) type ApprovalsOf = BoundedBTreeMap< + ::AccountId, + Option<::BlockNumber>, + >::ApprovalsLimit, +>; pub(super) type ItemDetailsFor = ItemDetails<::AccountId, DepositBalanceOf, ApprovalsOf>; pub(super) type BalanceOf = @@ -40,6 +48,12 @@ pub(super) type ItemTipOf = ItemTip< BalanceOf, >; +pub trait Incrementable { + fn increment(&self) -> Self; + fn initial_value() -> Self; +} +impl_incrementable!(u8, u16, u32, u64, u128, i8, i16, i32, i64, i128); + #[derive(Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug, TypeInfo, MaxEncodedLen)] pub struct CollectionDetails { /// Can change `owner`, `issuer`, `freezer` and `admin` accounts. @@ -53,16 +67,12 @@ pub struct CollectionDetails { /// The total balance deposited for the all storage associated with this collection. /// Used by `destroy`. pub(super) total_deposit: DepositBalance, - /// If `true`, then no deposit is needed to hold items of this collection. - pub(super) free_holding: bool, /// The total number of outstanding items of this collection. pub(super) items: u32, /// The total number of outstanding item metadata of this collection. pub(super) item_metadatas: u32, /// The total number of attributes for this collection. pub(super) attributes: u32, - /// Whether the collection is frozen for non-admin transfers. - pub(super) is_frozen: bool, } /// Witness data for the destroy transactions. @@ -96,8 +106,6 @@ pub struct ItemDetails { pub(super) owner: AccountId, /// The approved transferrer of this item, if one is set. pub(super) approvals: Approvals, - /// Whether the item can be transferred or not. - pub(super) is_frozen: bool, /// The amount held in the pallet's default account for this item. Free-hold items will have /// this as zero. pub(super) deposit: DepositBalance, @@ -115,8 +123,6 @@ pub struct CollectionMetadata> { /// will generally be either a JSON dump or the hash of some JSON which can be found on a /// hash-addressable global publication system such as IPFS. pub(super) data: BoundedVec, - /// Whether the collection's metadata may be changed by a non Force origin. - pub(super) is_frozen: bool, } #[derive(Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug, Default, TypeInfo, MaxEncodedLen)] @@ -131,8 +137,6 @@ pub struct ItemMetadata> { /// generally be either a JSON dump or the hash of some JSON which can be found on a /// hash-addressable global publication system such as IPFS. pub(super) data: BoundedVec, - /// Whether the item metadata may be changed by a non Force origin. - pub(super) is_frozen: bool, } #[derive(Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug, TypeInfo, MaxEncodedLen)] @@ -172,3 +176,128 @@ pub struct PriceWithDirection { /// A direction (send or receive). pub(super) direction: PriceDirection, } + +/// Support for up to 64 user-enabled features on a collection. +#[bitflags] +#[repr(u64)] +#[derive(Copy, Clone, RuntimeDebug, PartialEq, Eq, Encode, Decode, MaxEncodedLen, TypeInfo)] +pub enum CollectionSetting { + /// Items in this collection are transferable. + TransferableItems, + /// The metadata of this collection can be modified. + UnlockedMetadata, + /// Attributes of this collection can be modified. + UnlockedAttributes, + /// When this isn't set then the deposit is required to hold the items of this collection. + DepositRequired, +} +pub(super) type CollectionSettings = BitFlags; + +/// Wrapper type for `CollectionSettings` that implements `Codec`. +#[derive(Clone, Copy, PartialEq, Eq, Default, RuntimeDebug)] +pub struct CollectionConfig(pub CollectionSettings); + +impl CollectionConfig { + pub fn all_settings_enabled() -> Self { + Self(BitFlags::EMPTY) + } + pub fn get_disabled_settings(&self) -> CollectionSettings { + self.0 + } + pub fn is_setting_enabled(&self, setting: CollectionSetting) -> bool { + !self.get_disabled_settings().contains(setting) + } + pub fn has_disabled_setting(&self, setting: CollectionSetting) -> bool { + self.get_disabled_settings().contains(setting) + } + pub fn disable_settings(settings: CollectionSettings) -> Self { + Self(settings) + } + pub fn enable_setting(&mut self, setting: CollectionSetting) { + self.0.remove(setting); + } + pub fn disable_setting(&mut self, setting: CollectionSetting) { + self.0.insert(setting); + } +} +impl_codec_bitflags!(CollectionConfig, u64, CollectionSetting); + +/// Support for up to 64 user-enabled features on an item. +#[bitflags] +#[repr(u64)] +#[derive(Copy, Clone, RuntimeDebug, PartialEq, Eq, Encode, Decode, MaxEncodedLen, TypeInfo)] +pub enum ItemSetting { + /// This item is transferable. + Transferable, + /// The metadata of this item can be modified. + UnlockedMetadata, + /// Attributes of this item can be modified. + UnlockedAttributes, +} +pub(super) type ItemSettings = BitFlags; + +/// Wrapper type for `ItemSettings` that implements `Codec`. +#[derive(Clone, Copy, PartialEq, Eq, Default, RuntimeDebug)] +pub struct ItemConfig(pub ItemSettings); + +impl ItemConfig { + pub fn all_settings_enabled() -> Self { + Self(BitFlags::EMPTY) + } + pub fn get_disabled_settings(&self) -> ItemSettings { + self.0 + } + pub fn is_setting_enabled(&self, setting: ItemSetting) -> bool { + !self.get_disabled_settings().contains(setting) + } + pub fn has_disabled_setting(&self, setting: ItemSetting) -> bool { + self.get_disabled_settings().contains(setting) + } + pub fn has_disabled_settings(&self) -> bool { + !self.get_disabled_settings().is_empty() + } + pub fn disable_settings(settings: ItemSettings) -> Self { + Self(settings) + } + pub fn enable_setting(&mut self, setting: ItemSetting) { + self.0.remove(setting); + } + pub fn disable_setting(&mut self, setting: ItemSetting) { + self.0.insert(setting); + } +} +impl_codec_bitflags!(ItemConfig, u64, ItemSetting); + +/// Support for up to 64 system-enabled features on a collection. +#[bitflags] +#[repr(u64)] +#[derive(Copy, Clone, RuntimeDebug, PartialEq, Eq, Encode, Decode, MaxEncodedLen, TypeInfo)] +pub enum PalletFeature { + /// Enable/disable trading operations. + Trading, + /// Allow/disallow setting attributes. + Attributes, + /// Allow/disallow transfer approvals. + Approvals, + /// Allow/disallow atomic items swap. + Swaps, + /// Allow/disallow public mints. + PublicMints, +} + +/// Wrapper type for `BitFlags` that implements `Codec`. +#[derive(Default, RuntimeDebug)] +pub struct PalletFeatures(pub BitFlags); + +impl PalletFeatures { + pub fn all_enabled() -> Self { + Self(BitFlags::EMPTY) + } + pub fn disable(features: BitFlags) -> Self { + Self(features) + } + pub fn is_enabled(&self, feature: PalletFeature) -> bool { + !self.0.contains(feature) + } +} +impl_codec_bitflags!(PalletFeatures, u64, PalletFeature); diff --git a/frame/nfts/src/weights.rs b/frame/nfts/src/weights.rs index 9d62db0f8d85d..5f6ee43a09ffe 100644 --- a/frame/nfts/src/weights.rs +++ b/frame/nfts/src/weights.rs @@ -18,7 +18,7 @@ //! Autogenerated weights for pallet_nfts //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2022-09-30, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2022-10-03, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` //! HOSTNAME: `bm3`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 @@ -53,13 +53,13 @@ pub trait WeightInfo { fn burn() -> Weight; fn transfer() -> Weight; fn redeposit(i: u32, ) -> Weight; - fn freeze() -> Weight; - fn thaw() -> Weight; - fn freeze_collection() -> Weight; - fn thaw_collection() -> Weight; + fn lock_item_transfer() -> Weight; + fn unlock_item_transfer() -> Weight; + fn lock_collection() -> Weight; fn transfer_ownership() -> Weight; fn set_team() -> Weight; - fn force_item_status() -> Weight; + fn force_collection_status() -> Weight; + fn lock_item_properties() -> Weight; fn set_attribute() -> Weight; fn clear_attribute() -> Weight; fn set_metadata() -> Weight; @@ -85,231 +85,256 @@ impl WeightInfo for SubstrateWeight { // Storage: Nfts NextCollectionId (r:1 w:1) // Storage: Nfts Class (r:1 w:1) // Storage: Nfts ClassAccount (r:0 w:1) + // Storage: Nfts CollectionConfigOf (r:0 w:1) fn create() -> Weight { Weight::from_ref_time(38_062_000 as u64) .saturating_add(T::DbWeight::get().reads(2 as u64)) - .saturating_add(T::DbWeight::get().writes(3 as u64)) + .saturating_add(T::DbWeight::get().writes(4 as u64)) } // Storage: Nfts NextCollectionId (r:1 w:1) // Storage: Nfts Class (r:1 w:1) // Storage: Nfts ClassAccount (r:0 w:1) + // Storage: Nfts CollectionConfigOf (r:0 w:1) fn force_create() -> Weight { Weight::from_ref_time(25_917_000 as u64) .saturating_add(T::DbWeight::get().reads(2 as u64)) - .saturating_add(T::DbWeight::get().writes(3 as u64)) + .saturating_add(T::DbWeight::get().writes(4 as u64)) } // Storage: Nfts Class (r:1 w:1) // Storage: Nfts Asset (r:1 w:0) // Storage: Nfts ClassAccount (r:0 w:1) - // Storage: Nfts Attribute (r:0 w:1000) // Storage: Nfts ClassMetadataOf (r:0 w:1) - // Storage: Nfts InstanceMetadataOf (r:0 w:1000) + // Storage: Nfts CollectionConfigOf (r:0 w:1) // Storage: Nfts CollectionMaxSupply (r:0 w:1) + // Storage: Nfts Attribute (r:0 w:20) + // Storage: Nfts InstanceMetadataOf (r:0 w:20) + // Storage: Nfts ItemConfigOf (r:0 w:20) // Storage: Nfts Account (r:0 w:20) /// The range of component `n` is `[0, 1000]`. /// The range of component `m` is `[0, 1000]`. /// The range of component `a` is `[0, 1000]`. fn destroy(n: u32, m: u32, a: u32, ) -> Weight { - Weight::from_ref_time(2_432_555_000 as u64) - // Standard Error: 28_964 - .saturating_add(Weight::from_ref_time(8_474_465 as u64).saturating_mul(n as u64)) - // Standard Error: 28_964 - .saturating_add(Weight::from_ref_time(333_758 as u64).saturating_mul(m as u64)) - // Standard Error: 28_964 - .saturating_add(Weight::from_ref_time(222_052 as u64).saturating_mul(a as u64)) + Weight::from_ref_time(55_419_000 as u64) + // Standard Error: 18_623 + .saturating_add(Weight::from_ref_time(12_843_237 as u64).saturating_mul(n as u64)) + // Standard Error: 27_329 + .saturating_add(Weight::from_ref_time(315_839 as u64).saturating_mul(m as u64)) + // Standard Error: 27_329 + .saturating_add(Weight::from_ref_time(217_497 as u64).saturating_mul(a as u64)) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().reads((1 as u64).saturating_mul(n as u64))) - .saturating_add(T::DbWeight::get().writes(2004 as u64)) - .saturating_add(T::DbWeight::get().writes((1 as u64).saturating_mul(n as u64))) + .saturating_add(T::DbWeight::get().writes(5 as u64)) + .saturating_add(T::DbWeight::get().writes((5 as u64).saturating_mul(n as u64))) } // Storage: Nfts Asset (r:1 w:1) // Storage: Nfts Class (r:1 w:1) // Storage: Nfts CollectionMaxSupply (r:1 w:0) + // Storage: Nfts CollectionConfigOf (r:1 w:0) + // Storage: Nfts ItemConfigOf (r:0 w:1) // Storage: Nfts Account (r:0 w:1) fn mint() -> Weight { - Weight::from_ref_time(43_755_000 as u64) - .saturating_add(T::DbWeight::get().reads(3 as u64)) - .saturating_add(T::DbWeight::get().writes(3 as u64)) + Weight::from_ref_time(47_947_000 as u64) + .saturating_add(T::DbWeight::get().reads(4 as u64)) + .saturating_add(T::DbWeight::get().writes(4 as u64)) } // Storage: Nfts Class (r:1 w:1) // Storage: Nfts Asset (r:1 w:1) + // Storage: Nfts ItemConfigOf (r:0 w:1) // Storage: Nfts Account (r:0 w:1) // Storage: Nfts ItemPriceOf (r:0 w:1) // Storage: Nfts PendingSwapOf (r:0 w:1) fn burn() -> Weight { - Weight::from_ref_time(46_768_000 as u64) + Weight::from_ref_time(47_193_000 as u64) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(5 as u64)) } // Storage: Nfts Class (r:1 w:0) + // Storage: Nfts CollectionConfigOf (r:1 w:0) + // Storage: Nfts ItemConfigOf (r:1 w:0) // Storage: Nfts Asset (r:1 w:1) // Storage: Nfts Account (r:0 w:2) // Storage: Nfts ItemPriceOf (r:0 w:1) // Storage: Nfts PendingSwapOf (r:0 w:1) fn transfer() -> Weight { - Weight::from_ref_time(36_282_000 as u64) - .saturating_add(T::DbWeight::get().reads(2 as u64)) - .saturating_add(T::DbWeight::get().writes(5 as u64)) + Weight::from_ref_time(42_305_000 as u64) + .saturating_add(T::DbWeight::get().reads(4 as u64)) + .saturating_add(T::DbWeight::get().writes(4 as u64)) } // Storage: Nfts Class (r:1 w:1) + // Storage: Nfts CollectionConfigOf (r:1 w:0) // Storage: Nfts Asset (r:102 w:102) /// The range of component `i` is `[0, 5000]`. fn redeposit(i: u32, ) -> Weight { - Weight::from_ref_time(23_359_000 as u64) - // Standard Error: 9_645 - .saturating_add(Weight::from_ref_time(10_822_144 as u64).saturating_mul(i as u64)) - .saturating_add(T::DbWeight::get().reads(1 as u64)) + Weight::from_ref_time(26_327_000 as u64) + // Standard Error: 10_090 + .saturating_add(Weight::from_ref_time(10_876_864 as u64).saturating_mul(i as u64)) + .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().reads((1 as u64).saturating_mul(i as u64))) .saturating_add(T::DbWeight::get().writes(1 as u64)) .saturating_add(T::DbWeight::get().writes((1 as u64).saturating_mul(i as u64))) } - // Storage: Nfts Asset (r:1 w:1) // Storage: Nfts Class (r:1 w:0) - fn freeze() -> Weight { - Weight::from_ref_time(27_805_000 as u64) + // Storage: Nfts ItemConfigOf (r:1 w:1) + fn lock_item_transfer() -> Weight { + Weight::from_ref_time(28_194_000 as u64) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } - // Storage: Nfts Asset (r:1 w:1) // Storage: Nfts Class (r:1 w:0) - fn thaw() -> Weight { - Weight::from_ref_time(27_712_000 as u64) + // Storage: Nfts ItemConfigOf (r:1 w:1) + fn unlock_item_transfer() -> Weight { + Weight::from_ref_time(28_821_000 as u64) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } - // Storage: Nfts Class (r:1 w:1) - fn freeze_collection() -> Weight { - Weight::from_ref_time(23_068_000 as u64) - .saturating_add(T::DbWeight::get().reads(1 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) - } - // Storage: Nfts Class (r:1 w:1) - fn thaw_collection() -> Weight { - Weight::from_ref_time(23_200_000 as u64) - .saturating_add(T::DbWeight::get().reads(1 as u64)) + // Storage: Nfts Class (r:1 w:0) + // Storage: Nfts CollectionConfigOf (r:1 w:1) + fn lock_collection() -> Weight { + Weight::from_ref_time(25_896_000 as u64) + .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Nfts OwnershipAcceptance (r:1 w:1) // Storage: Nfts Class (r:1 w:1) // Storage: Nfts ClassAccount (r:0 w:2) fn transfer_ownership() -> Weight { - Weight::from_ref_time(31_800_000 as u64) + Weight::from_ref_time(32_728_000 as u64) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(4 as u64)) } // Storage: Nfts Class (r:1 w:1) fn set_team() -> Weight { - Weight::from_ref_time(23_959_000 as u64) + Weight::from_ref_time(24_805_000 as u64) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Nfts Class (r:1 w:1) // Storage: Nfts ClassAccount (r:0 w:1) - fn force_item_status() -> Weight { - Weight::from_ref_time(26_334_000 as u64) + // Storage: Nfts CollectionConfigOf (r:0 w:1) + fn force_collection_status() -> Weight { + Weight::from_ref_time(28_468_000 as u64) .saturating_add(T::DbWeight::get().reads(1 as u64)) - .saturating_add(T::DbWeight::get().writes(2 as u64)) + .saturating_add(T::DbWeight::get().writes(3 as u64)) + } + // Storage: Nfts Class (r:1 w:0) + // Storage: Nfts ItemConfigOf (r:1 w:1) + fn lock_item_properties() -> Weight { + Weight::from_ref_time(27_377_000 as u64) + .saturating_add(T::DbWeight::get().reads(2 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Nfts Class (r:1 w:1) - // Storage: Nfts InstanceMetadataOf (r:1 w:0) + // Storage: Nfts CollectionConfigOf (r:1 w:0) + // Storage: Nfts ItemConfigOf (r:1 w:0) // Storage: Nfts Attribute (r:1 w:1) fn set_attribute() -> Weight { - Weight::from_ref_time(50_978_000 as u64) - .saturating_add(T::DbWeight::get().reads(3 as u64)) + Weight::from_ref_time(53_019_000 as u64) + .saturating_add(T::DbWeight::get().reads(4 as u64)) .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Nfts Class (r:1 w:1) - // Storage: Nfts InstanceMetadataOf (r:1 w:0) + // Storage: Nfts CollectionConfigOf (r:1 w:0) + // Storage: Nfts ItemConfigOf (r:1 w:0) // Storage: Nfts Attribute (r:1 w:1) fn clear_attribute() -> Weight { - Weight::from_ref_time(49_555_000 as u64) - .saturating_add(T::DbWeight::get().reads(3 as u64)) + Weight::from_ref_time(52_530_000 as u64) + .saturating_add(T::DbWeight::get().reads(4 as u64)) .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Nfts Class (r:1 w:1) + // Storage: Nfts ItemConfigOf (r:1 w:0) + // Storage: Nfts CollectionConfigOf (r:1 w:0) // Storage: Nfts InstanceMetadataOf (r:1 w:1) fn set_metadata() -> Weight { - Weight::from_ref_time(41_099_000 as u64) - .saturating_add(T::DbWeight::get().reads(2 as u64)) + Weight::from_ref_time(48_054_000 as u64) + .saturating_add(T::DbWeight::get().reads(4 as u64)) .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Nfts Class (r:1 w:1) + // Storage: Nfts ItemConfigOf (r:1 w:0) // Storage: Nfts InstanceMetadataOf (r:1 w:1) fn clear_metadata() -> Weight { - Weight::from_ref_time(42_893_000 as u64) - .saturating_add(T::DbWeight::get().reads(2 as u64)) + Weight::from_ref_time(46_590_000 as u64) + .saturating_add(T::DbWeight::get().reads(3 as u64)) .saturating_add(T::DbWeight::get().writes(2 as u64)) } + // Storage: Nfts CollectionConfigOf (r:1 w:0) // Storage: Nfts Class (r:1 w:1) // Storage: Nfts ClassMetadataOf (r:1 w:1) fn set_collection_metadata() -> Weight { - Weight::from_ref_time(39_785_000 as u64) - .saturating_add(T::DbWeight::get().reads(2 as u64)) + Weight::from_ref_time(44_281_000 as u64) + .saturating_add(T::DbWeight::get().reads(3 as u64)) .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Nfts Class (r:1 w:0) + // Storage: Nfts CollectionConfigOf (r:1 w:0) // Storage: Nfts ClassMetadataOf (r:1 w:1) fn clear_collection_metadata() -> Weight { - Weight::from_ref_time(39_764_000 as u64) - .saturating_add(T::DbWeight::get().reads(2 as u64)) + Weight::from_ref_time(42_355_000 as u64) + .saturating_add(T::DbWeight::get().reads(3 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Nfts Class (r:1 w:0) // Storage: Nfts Asset (r:1 w:1) + // Storage: Nfts CollectionConfigOf (r:1 w:0) fn approve_transfer() -> Weight { - Weight::from_ref_time(29_577_000 as u64) - .saturating_add(T::DbWeight::get().reads(2 as u64)) + Weight::from_ref_time(33_170_000 as u64) + .saturating_add(T::DbWeight::get().reads(3 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Nfts Class (r:1 w:0) // Storage: Nfts Asset (r:1 w:1) fn cancel_approval() -> Weight { - Weight::from_ref_time(29_696_000 as u64) + Weight::from_ref_time(31_121_000 as u64) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Nfts Class (r:1 w:0) // Storage: Nfts Asset (r:1 w:1) fn clear_all_transfer_approvals() -> Weight { - Weight::from_ref_time(28_692_000 as u64) + Weight::from_ref_time(30_133_000 as u64) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Nfts OwnershipAcceptance (r:1 w:1) fn set_accept_ownership() -> Weight { - Weight::from_ref_time(26_345_000 as u64) + Weight::from_ref_time(26_421_000 as u64) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Nfts CollectionMaxSupply (r:1 w:1) // Storage: Nfts Class (r:1 w:0) fn set_collection_max_supply() -> Weight { - Weight::from_ref_time(24_826_000 as u64) + Weight::from_ref_time(26_358_000 as u64) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Nfts Asset (r:1 w:0) + // Storage: Nfts CollectionConfigOf (r:1 w:0) + // Storage: Nfts ItemConfigOf (r:1 w:0) // Storage: Nfts ItemPriceOf (r:0 w:1) fn set_price() -> Weight { - Weight::from_ref_time(26_376_000 as u64) - .saturating_add(T::DbWeight::get().reads(1 as u64)) + Weight::from_ref_time(33_607_000 as u64) + .saturating_add(T::DbWeight::get().reads(3 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Nfts Asset (r:1 w:1) // Storage: Nfts ItemPriceOf (r:1 w:1) // Storage: Nfts Class (r:1 w:0) + // Storage: Nfts CollectionConfigOf (r:1 w:0) + // Storage: Nfts ItemConfigOf (r:1 w:0) // Storage: Nfts Account (r:0 w:2) // Storage: Nfts PendingSwapOf (r:0 w:1) fn buy_item() -> Weight { - Weight::from_ref_time(49_140_000 as u64) - .saturating_add(T::DbWeight::get().reads(3 as u64)) - .saturating_add(T::DbWeight::get().writes(5 as u64)) + Weight::from_ref_time(54_511_000 as u64) + .saturating_add(T::DbWeight::get().reads(5 as u64)) + .saturating_add(T::DbWeight::get().writes(4 as u64)) } /// The range of component `n` is `[0, 10]`. fn pay_tips(n: u32, ) -> Weight { - Weight::from_ref_time(5_477_000 as u64) - // Standard Error: 33_188 - .saturating_add(Weight::from_ref_time(4_285_339 as u64).saturating_mul(n as u64)) + Weight::from_ref_time(6_015_000 as u64) + // Standard Error: 34_307 + .saturating_add(Weight::from_ref_time(4_308_600 as u64).saturating_mul(n as u64)) } // Storage: Nfts Asset (r:2 w:0) // Storage: Nfts PendingSwapOf (r:0 w:1) @@ -342,225 +367,250 @@ impl WeightInfo for () { // Storage: Nfts NextCollectionId (r:1 w:1) // Storage: Nfts Class (r:1 w:1) // Storage: Nfts ClassAccount (r:0 w:1) + // Storage: Nfts CollectionConfigOf (r:0 w:1) fn create() -> Weight { - Weight::from_ref_time(38_062_000 as u64) + Weight::from_ref_time(39_252_000 as u64) .saturating_add(RocksDbWeight::get().reads(2 as u64)) - .saturating_add(RocksDbWeight::get().writes(3 as u64)) + .saturating_add(RocksDbWeight::get().writes(4 as u64)) } // Storage: Nfts NextCollectionId (r:1 w:1) // Storage: Nfts Class (r:1 w:1) // Storage: Nfts ClassAccount (r:0 w:1) + // Storage: Nfts CollectionConfigOf (r:0 w:1) fn force_create() -> Weight { - Weight::from_ref_time(25_917_000 as u64) + Weight::from_ref_time(27_479_000 as u64) .saturating_add(RocksDbWeight::get().reads(2 as u64)) - .saturating_add(RocksDbWeight::get().writes(3 as u64)) + .saturating_add(RocksDbWeight::get().writes(4 as u64)) } // Storage: Nfts Class (r:1 w:1) // Storage: Nfts Asset (r:1 w:0) // Storage: Nfts ClassAccount (r:0 w:1) - // Storage: Nfts Attribute (r:0 w:1000) // Storage: Nfts ClassMetadataOf (r:0 w:1) - // Storage: Nfts InstanceMetadataOf (r:0 w:1000) + // Storage: Nfts CollectionConfigOf (r:0 w:1) // Storage: Nfts CollectionMaxSupply (r:0 w:1) + // Storage: Nfts Attribute (r:0 w:20) + // Storage: Nfts InstanceMetadataOf (r:0 w:20) + // Storage: Nfts ItemConfigOf (r:0 w:20) // Storage: Nfts Account (r:0 w:20) /// The range of component `n` is `[0, 1000]`. /// The range of component `m` is `[0, 1000]`. /// The range of component `a` is `[0, 1000]`. fn destroy(n: u32, m: u32, a: u32, ) -> Weight { - Weight::from_ref_time(2_432_555_000 as u64) - // Standard Error: 28_964 - .saturating_add(Weight::from_ref_time(8_474_465 as u64).saturating_mul(n as u64)) - // Standard Error: 28_964 - .saturating_add(Weight::from_ref_time(333_758 as u64).saturating_mul(m as u64)) - // Standard Error: 28_964 - .saturating_add(Weight::from_ref_time(222_052 as u64).saturating_mul(a as u64)) + Weight::from_ref_time(55_419_000 as u64) + // Standard Error: 18_623 + .saturating_add(Weight::from_ref_time(12_843_237 as u64).saturating_mul(n as u64)) + // Standard Error: 27_329 + .saturating_add(Weight::from_ref_time(315_839 as u64).saturating_mul(m as u64)) + // Standard Error: 27_329 + .saturating_add(Weight::from_ref_time(217_497 as u64).saturating_mul(a as u64)) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().reads((1 as u64).saturating_mul(n as u64))) - .saturating_add(RocksDbWeight::get().writes(2004 as u64)) - .saturating_add(RocksDbWeight::get().writes((1 as u64).saturating_mul(n as u64))) + .saturating_add(RocksDbWeight::get().writes(5 as u64)) + .saturating_add(RocksDbWeight::get().writes((5 as u64).saturating_mul(n as u64))) } // Storage: Nfts Asset (r:1 w:1) // Storage: Nfts Class (r:1 w:1) // Storage: Nfts CollectionMaxSupply (r:1 w:0) + // Storage: Nfts CollectionConfigOf (r:1 w:0) + // Storage: Nfts ItemConfigOf (r:0 w:1) // Storage: Nfts Account (r:0 w:1) fn mint() -> Weight { - Weight::from_ref_time(43_755_000 as u64) - .saturating_add(RocksDbWeight::get().reads(3 as u64)) - .saturating_add(RocksDbWeight::get().writes(3 as u64)) + Weight::from_ref_time(47_947_000 as u64) + .saturating_add(RocksDbWeight::get().reads(4 as u64)) + .saturating_add(RocksDbWeight::get().writes(4 as u64)) } // Storage: Nfts Class (r:1 w:1) // Storage: Nfts Asset (r:1 w:1) + // Storage: Nfts ItemConfigOf (r:0 w:1) // Storage: Nfts Account (r:0 w:1) // Storage: Nfts ItemPriceOf (r:0 w:1) // Storage: Nfts PendingSwapOf (r:0 w:1) fn burn() -> Weight { - Weight::from_ref_time(46_768_000 as u64) + Weight::from_ref_time(47_193_000 as u64) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(5 as u64)) } // Storage: Nfts Class (r:1 w:0) + // Storage: Nfts CollectionConfigOf (r:1 w:0) + // Storage: Nfts ItemConfigOf (r:1 w:0) // Storage: Nfts Asset (r:1 w:1) // Storage: Nfts Account (r:0 w:2) // Storage: Nfts ItemPriceOf (r:0 w:1) // Storage: Nfts PendingSwapOf (r:0 w:1) fn transfer() -> Weight { - Weight::from_ref_time(36_282_000 as u64) - .saturating_add(RocksDbWeight::get().reads(2 as u64)) - .saturating_add(RocksDbWeight::get().writes(5 as u64)) + Weight::from_ref_time(42_305_000 as u64) + .saturating_add(RocksDbWeight::get().reads(4 as u64)) + .saturating_add(RocksDbWeight::get().writes(4 as u64)) } // Storage: Nfts Class (r:1 w:1) + // Storage: Nfts CollectionConfigOf (r:1 w:0) // Storage: Nfts Asset (r:102 w:102) /// The range of component `i` is `[0, 5000]`. fn redeposit(i: u32, ) -> Weight { - Weight::from_ref_time(23_359_000 as u64) - // Standard Error: 9_645 - .saturating_add(Weight::from_ref_time(10_822_144 as u64).saturating_mul(i as u64)) - .saturating_add(RocksDbWeight::get().reads(1 as u64)) + Weight::from_ref_time(26_327_000 as u64) + // Standard Error: 10_090 + .saturating_add(Weight::from_ref_time(10_876_864 as u64).saturating_mul(i as u64)) + .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().reads((1 as u64).saturating_mul(i as u64))) .saturating_add(RocksDbWeight::get().writes(1 as u64)) .saturating_add(RocksDbWeight::get().writes((1 as u64).saturating_mul(i as u64))) } - // Storage: Nfts Asset (r:1 w:1) // Storage: Nfts Class (r:1 w:0) - fn freeze() -> Weight { - Weight::from_ref_time(27_805_000 as u64) + // Storage: Nfts ItemConfigOf (r:1 w:1) + fn lock_item_transfer() -> Weight { + Weight::from_ref_time(28_194_000 as u64) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } - // Storage: Nfts Asset (r:1 w:1) // Storage: Nfts Class (r:1 w:0) - fn thaw() -> Weight { - Weight::from_ref_time(27_712_000 as u64) + // Storage: Nfts ItemConfigOf (r:1 w:1) + fn unlock_item_transfer() -> Weight { + Weight::from_ref_time(28_821_000 as u64) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } - // Storage: Nfts Class (r:1 w:1) - fn freeze_collection() -> Weight { - Weight::from_ref_time(23_068_000 as u64) - .saturating_add(RocksDbWeight::get().reads(1 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) - } - // Storage: Nfts Class (r:1 w:1) - fn thaw_collection() -> Weight { - Weight::from_ref_time(23_200_000 as u64) - .saturating_add(RocksDbWeight::get().reads(1 as u64)) + // Storage: Nfts Class (r:1 w:0) + // Storage: Nfts CollectionConfigOf (r:1 w:1) + fn lock_collection() -> Weight { + Weight::from_ref_time(25_896_000 as u64) + .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Nfts OwnershipAcceptance (r:1 w:1) // Storage: Nfts Class (r:1 w:1) // Storage: Nfts ClassAccount (r:0 w:2) fn transfer_ownership() -> Weight { - Weight::from_ref_time(31_800_000 as u64) + Weight::from_ref_time(32_728_000 as u64) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(4 as u64)) } // Storage: Nfts Class (r:1 w:1) fn set_team() -> Weight { - Weight::from_ref_time(23_959_000 as u64) + Weight::from_ref_time(24_805_000 as u64) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Nfts Class (r:1 w:1) // Storage: Nfts ClassAccount (r:0 w:1) - fn force_item_status() -> Weight { - Weight::from_ref_time(26_334_000 as u64) + // Storage: Nfts CollectionConfigOf (r:0 w:1) + fn force_collection_status() -> Weight { + Weight::from_ref_time(28_468_000 as u64) .saturating_add(RocksDbWeight::get().reads(1 as u64)) - .saturating_add(RocksDbWeight::get().writes(2 as u64)) + .saturating_add(RocksDbWeight::get().writes(3 as u64)) + } + // Storage: Nfts Class (r:1 w:0) + // Storage: Nfts ItemConfigOf (r:1 w:1) + fn lock_item_properties() -> Weight { + Weight::from_ref_time(27_377_000 as u64) + .saturating_add(RocksDbWeight::get().reads(2 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Nfts Class (r:1 w:1) - // Storage: Nfts InstanceMetadataOf (r:1 w:0) + // Storage: Nfts CollectionConfigOf (r:1 w:0) + // Storage: Nfts ItemConfigOf (r:1 w:0) // Storage: Nfts Attribute (r:1 w:1) fn set_attribute() -> Weight { - Weight::from_ref_time(50_978_000 as u64) - .saturating_add(RocksDbWeight::get().reads(3 as u64)) + Weight::from_ref_time(53_019_000 as u64) + .saturating_add(RocksDbWeight::get().reads(4 as u64)) .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Nfts Class (r:1 w:1) - // Storage: Nfts InstanceMetadataOf (r:1 w:0) + // Storage: Nfts CollectionConfigOf (r:1 w:0) + // Storage: Nfts ItemConfigOf (r:1 w:0) // Storage: Nfts Attribute (r:1 w:1) fn clear_attribute() -> Weight { - Weight::from_ref_time(49_555_000 as u64) - .saturating_add(RocksDbWeight::get().reads(3 as u64)) + Weight::from_ref_time(52_530_000 as u64) + .saturating_add(RocksDbWeight::get().reads(4 as u64)) .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Nfts Class (r:1 w:1) + // Storage: Nfts ItemConfigOf (r:1 w:0) + // Storage: Nfts CollectionConfigOf (r:1 w:0) // Storage: Nfts InstanceMetadataOf (r:1 w:1) fn set_metadata() -> Weight { - Weight::from_ref_time(41_099_000 as u64) - .saturating_add(RocksDbWeight::get().reads(2 as u64)) + Weight::from_ref_time(48_054_000 as u64) + .saturating_add(RocksDbWeight::get().reads(4 as u64)) .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Nfts Class (r:1 w:1) + // Storage: Nfts ItemConfigOf (r:1 w:0) // Storage: Nfts InstanceMetadataOf (r:1 w:1) fn clear_metadata() -> Weight { - Weight::from_ref_time(42_893_000 as u64) - .saturating_add(RocksDbWeight::get().reads(2 as u64)) + Weight::from_ref_time(46_590_000 as u64) + .saturating_add(RocksDbWeight::get().reads(3 as u64)) .saturating_add(RocksDbWeight::get().writes(2 as u64)) } + // Storage: Nfts CollectionConfigOf (r:1 w:0) // Storage: Nfts Class (r:1 w:1) // Storage: Nfts ClassMetadataOf (r:1 w:1) fn set_collection_metadata() -> Weight { - Weight::from_ref_time(39_785_000 as u64) - .saturating_add(RocksDbWeight::get().reads(2 as u64)) + Weight::from_ref_time(44_281_000 as u64) + .saturating_add(RocksDbWeight::get().reads(3 as u64)) .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Nfts Class (r:1 w:0) + // Storage: Nfts CollectionConfigOf (r:1 w:0) // Storage: Nfts ClassMetadataOf (r:1 w:1) fn clear_collection_metadata() -> Weight { - Weight::from_ref_time(39_764_000 as u64) - .saturating_add(RocksDbWeight::get().reads(2 as u64)) + Weight::from_ref_time(42_355_000 as u64) + .saturating_add(RocksDbWeight::get().reads(3 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Nfts Class (r:1 w:0) // Storage: Nfts Asset (r:1 w:1) + // Storage: Nfts CollectionConfigOf (r:1 w:0) fn approve_transfer() -> Weight { - Weight::from_ref_time(29_577_000 as u64) - .saturating_add(RocksDbWeight::get().reads(2 as u64)) + Weight::from_ref_time(33_170_000 as u64) + .saturating_add(RocksDbWeight::get().reads(3 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Nfts Class (r:1 w:0) // Storage: Nfts Asset (r:1 w:1) fn cancel_approval() -> Weight { - Weight::from_ref_time(29_696_000 as u64) + Weight::from_ref_time(31_121_000 as u64) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Nfts Class (r:1 w:0) // Storage: Nfts Asset (r:1 w:1) fn clear_all_transfer_approvals() -> Weight { - Weight::from_ref_time(28_692_000 as u64) + Weight::from_ref_time(30_133_000 as u64) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Nfts OwnershipAcceptance (r:1 w:1) fn set_accept_ownership() -> Weight { - Weight::from_ref_time(26_345_000 as u64) + Weight::from_ref_time(26_421_000 as u64) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Nfts CollectionMaxSupply (r:1 w:1) // Storage: Nfts Class (r:1 w:0) fn set_collection_max_supply() -> Weight { - Weight::from_ref_time(24_826_000 as u64) + Weight::from_ref_time(26_358_000 as u64) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Nfts Asset (r:1 w:0) + // Storage: Nfts CollectionConfigOf (r:1 w:0) + // Storage: Nfts ItemConfigOf (r:1 w:0) // Storage: Nfts ItemPriceOf (r:0 w:1) fn set_price() -> Weight { - Weight::from_ref_time(26_376_000 as u64) - .saturating_add(RocksDbWeight::get().reads(1 as u64)) + Weight::from_ref_time(33_607_000 as u64) + .saturating_add(RocksDbWeight::get().reads(3 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Nfts Asset (r:1 w:1) // Storage: Nfts ItemPriceOf (r:1 w:1) // Storage: Nfts Class (r:1 w:0) + // Storage: Nfts CollectionConfigOf (r:1 w:0) + // Storage: Nfts ItemConfigOf (r:1 w:0) // Storage: Nfts Account (r:0 w:2) // Storage: Nfts PendingSwapOf (r:0 w:1) fn buy_item() -> Weight { - Weight::from_ref_time(49_140_000 as u64) - .saturating_add(RocksDbWeight::get().reads(3 as u64)) - .saturating_add(RocksDbWeight::get().writes(5 as u64)) + Weight::from_ref_time(54_511_000 as u64) + .saturating_add(RocksDbWeight::get().reads(5 as u64)) + .saturating_add(RocksDbWeight::get().writes(4 as u64)) } /// The range of component `n` is `[0, 10]`. fn pay_tips(n: u32, ) -> Weight { diff --git a/frame/support/src/traits/tokens.rs b/frame/support/src/traits/tokens.rs index 77eb83adfbfb0..b3b3b4b7d90b1 100644 --- a/frame/support/src/traits/tokens.rs +++ b/frame/support/src/traits/tokens.rs @@ -23,7 +23,9 @@ pub mod fungibles; pub mod imbalance; mod misc; pub mod nonfungible; +pub mod nonfungible_v2; pub mod nonfungibles; +pub mod nonfungibles_v2; pub use imbalance::Imbalance; pub use misc::{ AssetId, Balance, BalanceConversion, BalanceStatus, DepositConsequence, ExistenceRequirement, diff --git a/frame/support/src/traits/tokens/nonfungible_v2.rs b/frame/support/src/traits/tokens/nonfungible_v2.rs new file mode 100644 index 0000000000000..850195852cf72 --- /dev/null +++ b/frame/support/src/traits/tokens/nonfungible_v2.rs @@ -0,0 +1,204 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Traits for dealing with a single non-fungible collection of items. +//! +//! This assumes a single level namespace identified by `Inspect::ItemId`, and could +//! reasonably be implemented by pallets which wants to expose a single collection of NFT-like +//! objects. +//! +//! For an NFT API which has dual-level namespacing, the traits in `nonfungibles` are better to +//! use. + +use super::nonfungibles_v2 as nonfungibles; +use crate::{dispatch::DispatchResult, traits::Get}; +use codec::{Decode, Encode}; +use sp_runtime::TokenError; +use sp_std::prelude::*; + +/// Trait for providing an interface to a read-only NFT-like set of items. +pub trait Inspect { + /// Type for identifying an item. + type ItemId; + + /// Returns the owner of `item`, or `None` if the item doesn't exist or has no + /// owner. + fn owner(item: &Self::ItemId) -> Option; + + /// Returns the attribute value of `item` corresponding to `key`. + /// + /// By default this is `None`; no attributes are defined. + fn attribute(_item: &Self::ItemId, _key: &[u8]) -> Option> { + None + } + + /// Returns the strongly-typed attribute value of `item` corresponding to `key`. + /// + /// By default this just attempts to use `attribute`. + fn typed_attribute(item: &Self::ItemId, key: &K) -> Option { + key.using_encoded(|d| Self::attribute(item, d)) + .and_then(|v| V::decode(&mut &v[..]).ok()) + } + + /// Returns `true` if the `item` may be transferred. + /// + /// Default implementation is that all items are transferable. + fn can_transfer(_item: &Self::ItemId) -> bool { + true + } +} + +/// Interface for enumerating items in existence or owned by a given account over a collection +/// of NFTs. +pub trait InspectEnumerable: Inspect { + /// Returns an iterator of the items within a `collection` in existence. + fn items() -> Box>; + + /// Returns an iterator of the items of all collections owned by `who`. + fn owned(who: &AccountId) -> Box>; +} + +/// Trait for providing an interface for NFT-like items which may be minted, burned and/or have +/// attributes set on them. +pub trait Mutate: Inspect { + /// Mint some `item` to be owned by `who`. + /// + /// By default, this is not a supported operation. + fn mint_into(_item: &Self::ItemId, _who: &AccountId, _config: &ItemConfig) -> DispatchResult { + Err(TokenError::Unsupported.into()) + } + + /// Burn some `item`. + /// + /// By default, this is not a supported operation. + fn burn(_item: &Self::ItemId, _maybe_check_owner: Option<&AccountId>) -> DispatchResult { + Err(TokenError::Unsupported.into()) + } + + /// Set attribute `value` of `item`'s `key`. + /// + /// By default, this is not a supported operation. + fn set_attribute(_item: &Self::ItemId, _key: &[u8], _value: &[u8]) -> DispatchResult { + Err(TokenError::Unsupported.into()) + } + + /// Attempt to set the strongly-typed attribute `value` of `item`'s `key`. + /// + /// By default this just attempts to use `set_attribute`. + fn set_typed_attribute( + item: &Self::ItemId, + key: &K, + value: &V, + ) -> DispatchResult { + key.using_encoded(|k| value.using_encoded(|v| Self::set_attribute(item, k, v))) + } +} + +/// Trait for providing a non-fungible set of items which can only be transferred. +pub trait Transfer: Inspect { + /// Transfer `item` into `destination` account. + fn transfer(item: &Self::ItemId, destination: &AccountId) -> DispatchResult; +} + +/// Convert a `fungibles` trait implementation into a `fungible` trait implementation by identifying +/// a single item. +pub struct ItemOf< + F: nonfungibles::Inspect, + A: Get<>::CollectionId>, + AccountId, +>(sp_std::marker::PhantomData<(F, A, AccountId)>); + +impl< + F: nonfungibles::Inspect, + A: Get<>::CollectionId>, + AccountId, + > Inspect for ItemOf +{ + type ItemId = >::ItemId; + fn owner(item: &Self::ItemId) -> Option { + >::owner(&A::get(), item) + } + fn attribute(item: &Self::ItemId, key: &[u8]) -> Option> { + >::attribute(&A::get(), item, key) + } + fn typed_attribute(item: &Self::ItemId, key: &K) -> Option { + >::typed_attribute(&A::get(), item, key) + } + fn can_transfer(item: &Self::ItemId) -> bool { + >::can_transfer(&A::get(), item) + } +} + +impl< + F: nonfungibles::InspectEnumerable, + A: Get<>::CollectionId>, + AccountId, + > InspectEnumerable for ItemOf +{ + fn items() -> Box> { + >::items(&A::get()) + } + fn owned(who: &AccountId) -> Box> { + >::owned_in_collection(&A::get(), who) + } +} + +impl< + F: nonfungibles::Mutate, + A: Get<>::CollectionId>, + AccountId, + ItemConfig, + > Mutate for ItemOf +{ + fn mint_into(item: &Self::ItemId, who: &AccountId, config: &ItemConfig) -> DispatchResult { + >::mint_into(&A::get(), item, who, config) + } + fn burn(item: &Self::ItemId, maybe_check_owner: Option<&AccountId>) -> DispatchResult { + >::burn(&A::get(), item, maybe_check_owner) + } + fn set_attribute(item: &Self::ItemId, key: &[u8], value: &[u8]) -> DispatchResult { + >::set_attribute( + &A::get(), + item, + key, + value, + ) + } + fn set_typed_attribute( + item: &Self::ItemId, + key: &K, + value: &V, + ) -> DispatchResult { + >::set_typed_attribute( + &A::get(), + item, + key, + value, + ) + } +} + +impl< + F: nonfungibles::Transfer, + A: Get<>::CollectionId>, + AccountId, + > Transfer for ItemOf +{ + fn transfer(item: &Self::ItemId, destination: &AccountId) -> DispatchResult { + >::transfer(&A::get(), item, destination) + } +} diff --git a/frame/support/src/traits/tokens/nonfungibles_v2.rs b/frame/support/src/traits/tokens/nonfungibles_v2.rs new file mode 100644 index 0000000000000..d23e6d67573c7 --- /dev/null +++ b/frame/support/src/traits/tokens/nonfungibles_v2.rs @@ -0,0 +1,243 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Traits for dealing with multiple collections of non-fungible items. +//! +//! This assumes a dual-level namespace identified by `Inspect::ItemId`, and could +//! reasonably be implemented by pallets which want to expose multiple independent collections of +//! NFT-like objects. +//! +//! For an NFT API which has single-level namespacing, the traits in `nonfungible` are better to +//! use. +//! +//! Implementations of these traits may be converted to implementations of corresponding +//! `nonfungible` traits by using the `nonfungible::ItemOf` type adapter. + +use crate::dispatch::{DispatchError, DispatchResult}; +use codec::{Decode, Encode}; +use sp_runtime::TokenError; +use sp_std::prelude::*; + +/// Trait for providing an interface to many read-only NFT-like sets of items. +pub trait Inspect { + /// Type for identifying an item. + type ItemId; + + /// Type for identifying a collection (an identifier for an independent collection of + /// items). + type CollectionId; + + /// Returns the owner of `item` of `collection`, or `None` if the item doesn't exist + /// (or somehow has no owner). + fn owner(collection: &Self::CollectionId, item: &Self::ItemId) -> Option; + + /// Returns the owner of the `collection`, if there is one. For many NFTs this may not + /// make any sense, so users of this API should not be surprised to find a collection + /// results in `None` here. + fn collection_owner(_collection: &Self::CollectionId) -> Option { + None + } + + /// Returns the attribute value of `item` of `collection` corresponding to `key`. + /// + /// By default this is `None`; no attributes are defined. + fn attribute( + _collection: &Self::CollectionId, + _item: &Self::ItemId, + _key: &[u8], + ) -> Option> { + None + } + + /// Returns the strongly-typed attribute value of `item` of `collection` corresponding to + /// `key`. + /// + /// By default this just attempts to use `attribute`. + fn typed_attribute( + collection: &Self::CollectionId, + item: &Self::ItemId, + key: &K, + ) -> Option { + key.using_encoded(|d| Self::attribute(collection, item, d)) + .and_then(|v| V::decode(&mut &v[..]).ok()) + } + + /// Returns the attribute value of `collection` corresponding to `key`. + /// + /// By default this is `None`; no attributes are defined. + fn collection_attribute(_collection: &Self::CollectionId, _key: &[u8]) -> Option> { + None + } + + /// Returns the strongly-typed attribute value of `collection` corresponding to `key`. + /// + /// By default this just attempts to use `collection_attribute`. + fn typed_collection_attribute( + collection: &Self::CollectionId, + key: &K, + ) -> Option { + key.using_encoded(|d| Self::collection_attribute(collection, d)) + .and_then(|v| V::decode(&mut &v[..]).ok()) + } + + /// Returns `true` if the `item` of `collection` may be transferred. + /// + /// Default implementation is that all items are transferable. + fn can_transfer(_collection: &Self::CollectionId, _item: &Self::ItemId) -> bool { + true + } +} + +/// Interface for enumerating items in existence or owned by a given account over many collections +/// of NFTs. +pub trait InspectEnumerable: Inspect { + /// Returns an iterator of the collections in existence. + fn collections() -> Box>; + + /// Returns an iterator of the items of a `collection` in existence. + fn items(collection: &Self::CollectionId) -> Box>; + + /// Returns an iterator of the items of all collections owned by `who`. + fn owned(who: &AccountId) -> Box>; + + /// Returns an iterator of the items of `collection` owned by `who`. + fn owned_in_collection( + collection: &Self::CollectionId, + who: &AccountId, + ) -> Box>; +} + +/// Trait for providing the ability to create collections of nonfungible items. +pub trait Create: Inspect { + /// Create a `collection` of nonfungible items to be owned by `who` and managed by `admin`. + fn create_collection( + collection: &Self::CollectionId, + who: &AccountId, + admin: &AccountId, + config: &CollectionConfig, + ) -> DispatchResult; +} + +/// Trait for providing the ability to destroy collections of nonfungible items. +pub trait Destroy: Inspect { + /// The witness data needed to destroy an item. + type DestroyWitness; + + /// Provide the appropriate witness data needed to destroy an item. + fn get_destroy_witness(collection: &Self::CollectionId) -> Option; + + /// Destroy an existing fungible item. + /// * `collection`: The `CollectionId` to be destroyed. + /// * `witness`: Any witness data that needs to be provided to complete the operation + /// successfully. + /// * `maybe_check_owner`: An optional account id that can be used to authorize the destroy + /// command. If not provided, we will not do any authorization checks before destroying the + /// item. + /// + /// If successful, this function will return the actual witness data from the destroyed item. + /// This may be different than the witness data provided, and can be used to refund weight. + fn destroy( + collection: Self::CollectionId, + witness: Self::DestroyWitness, + maybe_check_owner: Option, + ) -> Result; +} + +/// Trait for providing an interface for multiple collections of NFT-like items which may be +/// minted, burned and/or have attributes set on them. +pub trait Mutate: Inspect { + /// Mint some `item` of `collection` to be owned by `who`. + /// + /// By default, this is not a supported operation. + fn mint_into( + _collection: &Self::CollectionId, + _item: &Self::ItemId, + _who: &AccountId, + _config: &ItemConfig, + ) -> DispatchResult { + Err(TokenError::Unsupported.into()) + } + + /// Burn some `item` of `collection`. + /// + /// By default, this is not a supported operation. + fn burn( + _collection: &Self::CollectionId, + _item: &Self::ItemId, + _maybe_check_owner: Option<&AccountId>, + ) -> DispatchResult { + Err(TokenError::Unsupported.into()) + } + + /// Set attribute `value` of `item` of `collection`'s `key`. + /// + /// By default, this is not a supported operation. + fn set_attribute( + _collection: &Self::CollectionId, + _item: &Self::ItemId, + _key: &[u8], + _value: &[u8], + ) -> DispatchResult { + Err(TokenError::Unsupported.into()) + } + + /// Attempt to set the strongly-typed attribute `value` of `item` of `collection`'s `key`. + /// + /// By default this just attempts to use `set_attribute`. + fn set_typed_attribute( + collection: &Self::CollectionId, + item: &Self::ItemId, + key: &K, + value: &V, + ) -> DispatchResult { + key.using_encoded(|k| value.using_encoded(|v| Self::set_attribute(collection, item, k, v))) + } + + /// Set attribute `value` of `collection`'s `key`. + /// + /// By default, this is not a supported operation. + fn set_collection_attribute( + _collection: &Self::CollectionId, + _key: &[u8], + _value: &[u8], + ) -> DispatchResult { + Err(TokenError::Unsupported.into()) + } + + /// Attempt to set the strongly-typed attribute `value` of `collection`'s `key`. + /// + /// By default this just attempts to use `set_attribute`. + fn set_typed_collection_attribute( + collection: &Self::CollectionId, + key: &K, + value: &V, + ) -> DispatchResult { + key.using_encoded(|k| { + value.using_encoded(|v| Self::set_collection_attribute(collection, k, v)) + }) + } +} + +/// Trait for providing a non-fungible sets of items which can only be transferred. +pub trait Transfer: Inspect { + /// Transfer `item` of `collection` into `destination` account. + fn transfer( + collection: &Self::CollectionId, + item: &Self::ItemId, + destination: &AccountId, + ) -> DispatchResult; +} From 6763dd6124882843e488c905dc0ab6b030670c4d Mon Sep 17 00:00:00 2001 From: Jegor Sidorenko <5252494+jsidorenko@users.noreply.github.com> Date: Thu, 20 Oct 2022 11:32:52 +0300 Subject: [PATCH 015/101] [Uniques V2] Refactor roles (#12437) * Basics * WIP: change the data format * Refactor * Remove redundant new() method * Rename settings * Enable tests * Chore * Change params order * Delete the config on collection removal * Chore * Remove redundant system features * Rename force_item_status to force_collection_status * Update node runtime * Chore * Remove thaw_collection * Chore * Connect collection.is_frozen to config * Allow to lock the collection in a new way * Move free_holding into settings * Connect collection's metadata locker to feature flags * DRY * Chore * Connect pallet level feature flags * Prepare tests for the new changes * Implement Item settings * Allow to lock the metadata or attributes of an item * Common -> Settings * Extract settings related code to a separate file * Move feature flag checks inside the do_* methods * Split settings.rs into parts * Extract repeated code into macro * Extract macros into their own file * Chore * Fix traits * Fix traits * Test SystemFeatures * Fix benchmarks * Add missing benchmark * Fix node/runtime/lib.rs * ".git/.scripts/bench-bot.sh" pallet dev pallet_nfts * Keep item's config on burn if it's not empty * Fix the merge artifacts * Fmt * Add SystemFeature::NoSwaps check * Refactor roles structure * Rename SystemFeatures to PalletFeatures * Rename errors * Add docs * Change error message * Rework pallet features * Move macros * Change comments * Fmt * Refactor Incrementable * Use pub(crate) for do_* functions * Update comments * Refactor freeze and lock functions * Rework Collection config and Item confg api * Chore * Make clippy happy * Chore * Fix artifacts * Address comments * Further refactoring * Add comments * Add tests for group_roles_by_account() * Update frame/nfts/src/impl_nonfungibles.rs * Add test * Replace Itertools group_by with a custom implementation * ItemsNotTransferable => ItemsNonTransferable * Update frame/nfts/src/features/roles.rs Co-authored-by: Muharem Ismailov * Address PR comments * Add missed comment Co-authored-by: command-bot <> Co-authored-by: Muharem Ismailov --- frame/nfts/src/benchmarking.rs | 2 +- frame/nfts/src/features/lock.rs | 22 +++--- frame/nfts/src/features/mod.rs | 1 + frame/nfts/src/features/roles.rs | 69 ++++++++++++++++++ frame/nfts/src/functions.rs | 22 +++--- frame/nfts/src/impl_nonfungibles.rs | 4 +- frame/nfts/src/lib.rs | 97 ++++++++++++++++++-------- frame/nfts/src/tests.rs | 104 +++++++++++++++++++++++----- frame/nfts/src/types.rs | 44 +++++++++--- 9 files changed, 291 insertions(+), 74 deletions(-) create mode 100644 frame/nfts/src/features/roles.rs diff --git a/frame/nfts/src/benchmarking.rs b/frame/nfts/src/benchmarking.rs index c65430fd35108..a5a264c40a715 100644 --- a/frame/nfts/src/benchmarking.rs +++ b/frame/nfts/src/benchmarking.rs @@ -68,7 +68,7 @@ fn add_collection_metadata, I: 'static>() -> (T::AccountId, Account fn mint_item, I: 'static>( index: u16, ) -> (T::ItemId, T::AccountId, AccountIdLookupOf) { - let caller = Collection::::get(T::Helper::collection(0)).unwrap().admin; + let caller = Collection::::get(T::Helper::collection(0)).unwrap().owner; if caller != whitelisted_caller() { whitelist_account!(caller); } diff --git a/frame/nfts/src/features/lock.rs b/frame/nfts/src/features/lock.rs index 0a5fecc1d6224..50420d8e3de87 100644 --- a/frame/nfts/src/features/lock.rs +++ b/frame/nfts/src/features/lock.rs @@ -24,10 +24,10 @@ impl, I: 'static> Pallet { collection: T::CollectionId, lock_config: CollectionConfig, ) -> DispatchResult { - let details = - Collection::::get(&collection).ok_or(Error::::UnknownCollection)?; - ensure!(origin == details.freezer, Error::::NoPermission); - + ensure!( + Self::has_role(&collection, &origin, CollectionRole::Freezer), + Error::::NoPermission + ); CollectionConfigOf::::try_mutate(collection, |maybe_config| { let config = maybe_config.as_mut().ok_or(Error::::NoConfig)?; @@ -51,9 +51,10 @@ impl, I: 'static> Pallet { collection: T::CollectionId, item: T::ItemId, ) -> DispatchResult { - let collection_details = - Collection::::get(&collection).ok_or(Error::::UnknownCollection)?; - ensure!(collection_details.freezer == origin, Error::::NoPermission); + ensure!( + Self::has_role(&collection, &origin, CollectionRole::Freezer), + Error::::NoPermission + ); let mut config = Self::get_item_config(&collection, &item)?; if !config.has_disabled_setting(ItemSetting::Transferable) { @@ -70,9 +71,10 @@ impl, I: 'static> Pallet { collection: T::CollectionId, item: T::ItemId, ) -> DispatchResult { - let collection_details = - Collection::::get(&collection).ok_or(Error::::UnknownCollection)?; - ensure!(collection_details.freezer == origin, Error::::NoPermission); + ensure!( + Self::has_role(&collection, &origin, CollectionRole::Freezer), + Error::::NoPermission + ); let mut config = Self::get_item_config(&collection, &item)?; if config.has_disabled_setting(ItemSetting::Transferable) { diff --git a/frame/nfts/src/features/mod.rs b/frame/nfts/src/features/mod.rs index 47e5816bc953c..f814d696d774b 100644 --- a/frame/nfts/src/features/mod.rs +++ b/frame/nfts/src/features/mod.rs @@ -18,4 +18,5 @@ pub mod atomic_swap; pub mod buy_sell; pub mod lock; +pub mod roles; pub mod settings; diff --git a/frame/nfts/src/features/roles.rs b/frame/nfts/src/features/roles.rs new file mode 100644 index 0000000000000..e961779725b6e --- /dev/null +++ b/frame/nfts/src/features/roles.rs @@ -0,0 +1,69 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::*; +use frame_support::pallet_prelude::*; +use sp_std::collections::btree_map::BTreeMap; + +impl, I: 'static> Pallet { + /// Clears all the roles in a specified collection. + /// + /// - `collection_id`: A collection to clear the roles in. + /// + /// Throws an error if some of the roles were left in storage. + /// This means the `CollectionRoles::max_roles()` needs to be adjusted. + pub(crate) fn clear_roles(collection_id: &T::CollectionId) -> Result<(), DispatchError> { + let res = CollectionRoleOf::::clear_prefix( + &collection_id, + CollectionRoles::max_roles() as u32, + None, + ); + ensure!(res.maybe_cursor.is_none(), Error::::RolesNotCleared); + Ok(()) + } + + /// Returns true if a specified account has a provided role within that collection. + /// + /// - `collection_id`: A collection to check the role in. + /// - `account_id`: An account to check the role for. + /// - `role`: A role to validate. + /// + /// Returns boolean. + pub(crate) fn has_role( + collection_id: &T::CollectionId, + account_id: &T::AccountId, + role: CollectionRole, + ) -> bool { + CollectionRoleOf::::get(&collection_id, &account_id) + .map_or(false, |roles| roles.has_role(role)) + } + + /// Groups provided roles by account, given one account could have multiple roles. + /// + /// - `input`: A vector of (Account, Role) tuples. + /// + /// Returns a grouped vector. + pub fn group_roles_by_account( + input: Vec<(T::AccountId, CollectionRole)>, + ) -> Vec<(T::AccountId, CollectionRoles)> { + let mut result = BTreeMap::new(); + for (account, role) in input.into_iter() { + result.entry(account).or_insert(CollectionRoles::none()).add_role(role); + } + result.into_iter().collect() + } +} diff --git a/frame/nfts/src/functions.rs b/frame/nfts/src/functions.rs index 275e3668d7a20..90a701bc9eaa0 100644 --- a/frame/nfts/src/functions.rs +++ b/frame/nfts/src/functions.rs @@ -41,7 +41,7 @@ impl, I: 'static> Pallet { let collection_config = Self::get_collection_config(&collection)?; ensure!( collection_config.is_setting_enabled(CollectionSetting::TransferableItems), - Error::::ItemsNotTransferable + Error::::ItemsNonTransferable ); let item_config = Self::get_item_config(&collection, &item)?; @@ -93,15 +93,19 @@ impl, I: 'static> Pallet { collection, CollectionDetails { owner: owner.clone(), - issuer: admin.clone(), - admin: admin.clone(), - freezer: admin, total_deposit: deposit, items: 0, item_metadatas: 0, attributes: 0, }, ); + CollectionRoleOf::::insert( + collection, + admin, + CollectionRoles( + CollectionRole::Admin | CollectionRole::Freezer | CollectionRole::Issuer, + ), + ); let next_id = collection.increment(); @@ -142,6 +146,7 @@ impl, I: 'static> Pallet { #[allow(deprecated)] PendingSwapOf::::remove_prefix(&collection, None); CollectionMetadataOf::::remove(&collection); + Self::clear_roles(&collection)?; #[allow(deprecated)] Attribute::::remove_prefix((&collection,), None); CollectionAccount::::remove(&collection_details.owner, &collection); @@ -165,7 +170,6 @@ impl, I: 'static> Pallet { item: T::ItemId, owner: T::AccountId, config: ItemConfig, - with_details: impl FnOnce(&CollectionDetailsFor) -> DispatchResult, ) -> DispatchResult { ensure!(!Item::::contains_key(collection, item), Error::::AlreadyExists); @@ -175,8 +179,6 @@ impl, I: 'static> Pallet { let collection_details = maybe_collection_details.as_mut().ok_or(Error::::UnknownCollection)?; - with_details(collection_details)?; - if let Ok(max_supply) = CollectionMaxSupply::::try_get(&collection) { ensure!(collection_details.items < max_supply, Error::::MaxSupplyReached); } @@ -218,7 +220,7 @@ impl, I: 'static> Pallet { pub fn do_burn( collection: T::CollectionId, item: T::ItemId, - with_details: impl FnOnce(&CollectionDetailsFor, &ItemDetailsFor) -> DispatchResult, + with_details: impl FnOnce(&ItemDetailsFor) -> DispatchResult, ) -> DispatchResult { let owner = Collection::::try_mutate( &collection, @@ -227,7 +229,7 @@ impl, I: 'static> Pallet { maybe_collection_details.as_mut().ok_or(Error::::UnknownCollection)?; let details = Item::::get(&collection, &item) .ok_or(Error::::UnknownCollection)?; - with_details(collection_details, &details)?; + with_details(&details)?; // Return the deposit. T::Currency::unreserve(&collection_details.owner, details.deposit); @@ -271,7 +273,7 @@ impl, I: 'static> Pallet { let collection_config = Self::get_collection_config(&collection)?; ensure!( collection_config.is_setting_enabled(CollectionSetting::TransferableItems), - Error::::ItemsNotTransferable + Error::::ItemsNonTransferable ); let item_config = Self::get_item_config(&collection, &item)?; diff --git a/frame/nfts/src/impl_nonfungibles.rs b/frame/nfts/src/impl_nonfungibles.rs index 8a7c79fc0c14f..210fe4831991d 100644 --- a/frame/nfts/src/impl_nonfungibles.rs +++ b/frame/nfts/src/impl_nonfungibles.rs @@ -143,7 +143,7 @@ impl, I: 'static> Mutate<::AccountId, ItemSettin who: &T::AccountId, settings: &ItemSettings, ) -> DispatchResult { - Self::do_mint(*collection, *item, who.clone(), ItemConfig(*settings), |_| Ok(())) + Self::do_mint(*collection, *item, who.clone(), ItemConfig(*settings)) } fn burn( @@ -151,7 +151,7 @@ impl, I: 'static> Mutate<::AccountId, ItemSettin item: &Self::ItemId, maybe_check_owner: Option<&T::AccountId>, ) -> DispatchResult { - Self::do_burn(*collection, *item, |_, d| { + Self::do_burn(*collection, *item, |d| { if let Some(check_owner) = maybe_check_owner { if &d.owner != check_owner { return Err(Error::::NoPermission.into()) diff --git a/frame/nfts/src/lib.rs b/frame/nfts/src/lib.rs index bfba0c1ea3330..8b8b21f944f3c 100644 --- a/frame/nfts/src/lib.rs +++ b/frame/nfts/src/lib.rs @@ -219,6 +219,19 @@ pub mod pallet { OptionQuery, >; + /// The items in existence and their ownership details. + #[pallet::storage] + /// Stores collection roles as per account. + pub(super) type CollectionRoleOf, I: 'static = ()> = StorageDoubleMap< + _, + Blake2_128Concat, + T::CollectionId, + Blake2_128Concat, + T::AccountId, + CollectionRoles, + OptionQuery, + >; + /// The items in existence and their ownership details. #[pallet::storage] #[pallet::storage_prefix = "Asset"] @@ -497,7 +510,7 @@ pub mod pallet { /// Collection ID is already taken. CollectionIdInUse, /// Items within that collection are non-transferable. - ItemsNotTransferable, + ItemsNonTransferable, /// The provided account is not a delegate. NotDelegate, /// The delegate turned out to be different to what was expected. @@ -544,6 +557,8 @@ pub mod pallet { InconsistentItemConfig, /// Config for a collection or an item can't be found. NoConfig, + /// Some roles were not cleared. + RolesNotCleared, } impl, I: 'static> Pallet { @@ -702,10 +717,11 @@ pub mod pallet { let origin = ensure_signed(origin)?; let owner = T::Lookup::lookup(owner)?; - Self::do_mint(collection, item, owner, config, |collection_details| { - ensure!(collection_details.issuer == origin, Error::::NoPermission); - Ok(()) - }) + ensure!( + Self::has_role(&collection, &origin, CollectionRole::Issuer), + Error::::NoPermission + ); + Self::do_mint(collection, item, owner, config) } /// Destroy a single item. @@ -731,8 +747,9 @@ pub mod pallet { let origin = ensure_signed(origin)?; let check_owner = check_owner.map(T::Lookup::lookup).transpose()?; - Self::do_burn(collection, item, |collection_details, details| { - let is_permitted = collection_details.admin == origin || details.owner == origin; + Self::do_burn(collection, item, |details| { + let is_admin = Self::has_role(&collection, &origin, CollectionRole::Admin); + let is_permitted = is_admin || details.owner == origin; ensure!(is_permitted, Error::::NoPermission); ensure!( check_owner.map_or(true, |o| o == details.owner), @@ -767,8 +784,9 @@ pub mod pallet { let origin = ensure_signed(origin)?; let dest = T::Lookup::lookup(dest)?; - Self::do_transfer(collection, item, dest, |collection_details, details| { - if details.owner != origin && collection_details.admin != origin { + Self::do_transfer(collection, item, dest, |_, details| { + let is_admin = Self::has_role(&collection, &origin, CollectionRole::Admin); + if details.owner != origin && !is_admin { let deadline = details.approvals.get(&origin).ok_or(Error::::NoPermission)?; if let Some(d) = deadline { @@ -986,9 +1004,17 @@ pub mod pallet { let details = maybe_details.as_mut().ok_or(Error::::UnknownCollection)?; ensure!(origin == details.owner, Error::::NoPermission); - details.issuer = issuer.clone(); - details.admin = admin.clone(); - details.freezer = freezer.clone(); + // delete previous values + Self::clear_roles(&collection)?; + + let account_to_role = Self::group_roles_by_account(vec![ + (issuer.clone(), CollectionRole::Issuer), + (admin.clone(), CollectionRole::Admin), + (freezer.clone(), CollectionRole::Freezer), + ]); + for (account, roles) in account_to_role { + CollectionRoleOf::::insert(&collection, &account, roles); + } Self::deposit_event(Event::TeamChanged { collection, issuer, admin, freezer }); Ok(()) @@ -1026,19 +1052,24 @@ pub mod pallet { let delegate = T::Lookup::lookup(delegate)?; - let collection_details = - Collection::::get(&collection).ok_or(Error::::UnknownCollection)?; let mut details = - Item::::get(&collection, &item).ok_or(Error::::UnknownCollection)?; + Item::::get(&collection, &item).ok_or(Error::::UnknownItem)?; + + let collection_config = Self::get_collection_config(&collection)?; + ensure!( + collection_config.is_setting_enabled(CollectionSetting::TransferableItems), + Error::::ItemsNonTransferable + ); let collection_config = Self::get_collection_config(&collection)?; ensure!( collection_config.is_setting_enabled(CollectionSetting::TransferableItems), - Error::::ItemsNotTransferable + Error::::ItemsNonTransferable ); if let Some(check) = maybe_check { - let permitted = check == collection_details.admin || check == details.owner; + let is_admin = Self::has_role(&collection, &check, CollectionRole::Admin); + let permitted = is_admin || check == details.owner; ensure!(permitted, Error::::NoPermission); } @@ -1090,10 +1121,8 @@ pub mod pallet { let delegate = T::Lookup::lookup(delegate)?; - let collection_details = - Collection::::get(&collection).ok_or(Error::::UnknownCollection)?; let mut details = - Item::::get(&collection, &item).ok_or(Error::::UnknownCollection)?; + Item::::get(&collection, &item).ok_or(Error::::UnknownItem)?; let maybe_deadline = details.approvals.get(&delegate).ok_or(Error::::NotDelegate)?; @@ -1107,7 +1136,8 @@ pub mod pallet { if !is_past_deadline { if let Some(check) = maybe_check { - let permitted = check == collection_details.admin || check == details.owner; + let is_admin = Self::has_role(&collection, &check, CollectionRole::Admin); + let permitted = is_admin || check == details.owner; ensure!(permitted, Error::::NoPermission); } } @@ -1148,12 +1178,12 @@ pub mod pallet { .map(|_| None) .or_else(|origin| ensure_signed(origin).map(Some).map_err(DispatchError::from))?; - let collection_details = - Collection::::get(&collection).ok_or(Error::::UnknownCollection)?; let mut details = Item::::get(&collection, &item).ok_or(Error::::UnknownCollection)?; + if let Some(check) = maybe_check { - let permitted = check == collection_details.admin || check == details.owner; + let is_admin = Self::has_role(&collection, &check, CollectionRole::Admin); + let permitted = is_admin || check == details.owner; ensure!(permitted, Error::::NoPermission); } @@ -1200,14 +1230,27 @@ pub mod pallet { let old_owner = collection_info.owner; let new_owner = T::Lookup::lookup(owner)?; collection_info.owner = new_owner.clone(); - collection_info.issuer = T::Lookup::lookup(issuer)?; - collection_info.admin = T::Lookup::lookup(admin)?; - collection_info.freezer = T::Lookup::lookup(freezer)?; *maybe_collection = Some(collection_info); CollectionAccount::::remove(&old_owner, &collection); CollectionAccount::::insert(&new_owner, &collection, ()); CollectionConfigOf::::insert(&collection, config); + let issuer = T::Lookup::lookup(issuer)?; + let admin = T::Lookup::lookup(admin)?; + let freezer = T::Lookup::lookup(freezer)?; + + // delete previous values + Self::clear_roles(&collection)?; + + let account_to_role = Self::group_roles_by_account(vec![ + (issuer, CollectionRole::Issuer), + (admin, CollectionRole::Admin), + (freezer, CollectionRole::Freezer), + ]); + for (account, roles) in account_to_role { + CollectionRoleOf::::insert(&collection, &account, roles); + } + Self::deposit_event(Event::CollectionStatusChanged { collection }); Ok(()) }) diff --git a/frame/nfts/src/tests.rs b/frame/nfts/src/tests.rs index 1b60fd6431b19..d0841ebc1f238 100644 --- a/frame/nfts/src/tests.rs +++ b/frame/nfts/src/tests.rs @@ -21,7 +21,7 @@ use crate::{mock::*, Event, *}; use frame_support::{ assert_noop, assert_ok, dispatch::Dispatchable, - traits::{Currency, Get}, + traits::{tokens::nonfungibles_v2::Destroy, Currency, Get}, }; use pallet_balances::Error as BalancesError; use sp_std::prelude::*; @@ -152,7 +152,7 @@ fn lifecycle_should_work() { assert_eq!(Balances::reserved_balance(&1), 13); assert!(ItemMetadataOf::::contains_key(0, 69)); - let w = Collection::::get(0).unwrap().destroy_witness(); + let w = Nfts::get_destroy_witness(&0).unwrap(); assert_eq!(w.items, 2); assert_eq!(w.item_metadatas, 2); assert_ok!(Nfts::destroy(RuntimeOrigin::signed(1), 0, w)); @@ -227,7 +227,7 @@ fn transfer_should_work() { assert_noop!( Nfts::transfer(RuntimeOrigin::signed(1), collection_id, 42, 3,), - Error::::ItemsNotTransferable + Error::::ItemsNonTransferable ); }); } @@ -248,7 +248,7 @@ fn locking_transfer_should_work() { )); assert_noop!( Nfts::transfer(RuntimeOrigin::signed(1), 0, 42, 2), - Error::::ItemsNotTransferable + Error::::ItemsNonTransferable ); assert_ok!(Nfts::force_collection_status( @@ -296,7 +296,7 @@ fn origin_guards_should_work() { Nfts::burn(RuntimeOrigin::signed(2), 0, 42, None), Error::::NoPermission ); - let w = Collection::::get(0).unwrap().destroy_witness(); + let w = Nfts::get_destroy_witness(&0).unwrap(); assert_noop!(Nfts::destroy(RuntimeOrigin::signed(2), 0, w), Error::::NoPermission); }); } @@ -550,7 +550,7 @@ fn set_attribute_should_work() { ); assert_eq!(Balances::reserved_balance(1), 16); - let w = Collection::::get(0).unwrap().destroy_witness(); + let w = Nfts::get_destroy_witness(&0).unwrap(); assert_ok!(Nfts::destroy(RuntimeOrigin::signed(1), 0, w)); assert_eq!(attributes(0), vec![]); assert_eq!(Balances::reserved_balance(1), 0); @@ -687,6 +687,48 @@ fn force_collection_status_should_work() { assert_ok!(Nfts::set_collection_metadata(RuntimeOrigin::signed(1), 0, bvec![0; 20])); assert_eq!(Balances::reserved_balance(1), 0); + + // validate new roles + assert_ok!(Nfts::force_collection_status( + RuntimeOrigin::root(), + 0, + 1, + 2, + 3, + 4, + CollectionConfig::all_settings_enabled(), + )); + assert_eq!( + CollectionRoleOf::::get(0, 2).unwrap(), + CollectionRoles(CollectionRole::Issuer.into()) + ); + assert_eq!( + CollectionRoleOf::::get(0, 3).unwrap(), + CollectionRoles(CollectionRole::Admin.into()) + ); + assert_eq!( + CollectionRoleOf::::get(0, 4).unwrap(), + CollectionRoles(CollectionRole::Freezer.into()) + ); + + assert_ok!(Nfts::force_collection_status( + RuntimeOrigin::root(), + 0, + 1, + 3, + 2, + 3, + CollectionConfig::all_settings_enabled(), + )); + + assert_eq!( + CollectionRoleOf::::get(0, 2).unwrap(), + CollectionRoles(CollectionRole::Admin.into()) + ); + assert_eq!( + CollectionRoleOf::::get(0, 3).unwrap(), + CollectionRoles(CollectionRole::Issuer | CollectionRole::Freezer) + ); }); } @@ -761,7 +803,7 @@ fn approval_lifecycle_works() { assert_noop!( Nfts::approve_transfer(RuntimeOrigin::signed(1), collection_id, 1, 2, None), - Error::::ItemsNotTransferable + Error::::ItemsNonTransferable ); }); } @@ -775,11 +817,11 @@ fn cancel_approval_works() { assert_ok!(Nfts::approve_transfer(RuntimeOrigin::signed(2), 0, 42, 3, None)); assert_noop!( Nfts::cancel_approval(RuntimeOrigin::signed(2), 1, 42, 3), - Error::::UnknownCollection + Error::::UnknownItem ); assert_noop!( Nfts::cancel_approval(RuntimeOrigin::signed(2), 0, 43, 3), - Error::::UnknownCollection + Error::::UnknownItem ); assert_noop!( Nfts::cancel_approval(RuntimeOrigin::signed(3), 0, 42, 3), @@ -898,11 +940,11 @@ fn cancel_approval_works_with_admin() { assert_ok!(Nfts::approve_transfer(RuntimeOrigin::signed(2), 0, 42, 3, None)); assert_noop!( Nfts::cancel_approval(RuntimeOrigin::signed(1), 1, 42, 1), - Error::::UnknownCollection + Error::::UnknownItem ); assert_noop!( Nfts::cancel_approval(RuntimeOrigin::signed(1), 0, 43, 1), - Error::::UnknownCollection + Error::::UnknownItem ); assert_noop!( Nfts::cancel_approval(RuntimeOrigin::signed(1), 0, 42, 4), @@ -926,11 +968,11 @@ fn cancel_approval_works_with_force() { assert_ok!(Nfts::approve_transfer(RuntimeOrigin::signed(2), 0, 42, 3, None)); assert_noop!( Nfts::cancel_approval(RuntimeOrigin::root(), 1, 42, 1), - Error::::UnknownCollection + Error::::UnknownItem ); assert_noop!( Nfts::cancel_approval(RuntimeOrigin::root(), 0, 43, 1), - Error::::UnknownCollection + Error::::UnknownItem ); assert_noop!( Nfts::cancel_approval(RuntimeOrigin::root(), 0, 42, 4), @@ -1041,7 +1083,7 @@ fn max_supply_should_work() { assert_ok!(Nfts::destroy( RuntimeOrigin::signed(user_id), collection_id, - Collection::::get(collection_id).unwrap().destroy_witness() + Nfts::get_destroy_witness(&collection_id).unwrap() )); assert!(!CollectionMaxSupply::::contains_key(collection_id)); }); @@ -1137,7 +1179,7 @@ fn set_price_should_work() { assert_noop!( Nfts::set_price(RuntimeOrigin::signed(user_id), collection_id, item_1, Some(2), None), - Error::::ItemsNotTransferable + Error::::ItemsNonTransferable ); }); } @@ -1276,7 +1318,7 @@ fn buy_item_should_work() { }); assert_noop!( buy_item_call.dispatch(RuntimeOrigin::signed(user_2)), - Error::::ItemsNotTransferable + Error::::ItemsNonTransferable ); // unlock the collection @@ -1824,3 +1866,33 @@ fn pallet_level_feature_flags_should_work() { ); }) } + +#[test] +fn group_roles_by_account_should_work() { + new_test_ext().execute_with(|| { + assert_eq!(Nfts::group_roles_by_account(vec![]), vec![]); + + let account_to_role = Nfts::group_roles_by_account(vec![ + (3, CollectionRole::Freezer), + (1, CollectionRole::Issuer), + (2, CollectionRole::Admin), + ]); + let expect = vec![ + (1, CollectionRoles(CollectionRole::Issuer.into())), + (2, CollectionRoles(CollectionRole::Admin.into())), + (3, CollectionRoles(CollectionRole::Freezer.into())), + ]; + assert_eq!(account_to_role, expect); + + let account_to_role = Nfts::group_roles_by_account(vec![ + (3, CollectionRole::Freezer), + (2, CollectionRole::Issuer), + (2, CollectionRole::Admin), + ]); + let expect = vec![ + (2, CollectionRoles(CollectionRole::Issuer | CollectionRole::Admin)), + (3, CollectionRoles(CollectionRole::Freezer.into())), + ]; + assert_eq!(account_to_role, expect); + }) +} diff --git a/frame/nfts/src/types.rs b/frame/nfts/src/types.rs index 6ed57e4da25e5..0122a817229ac 100644 --- a/frame/nfts/src/types.rs +++ b/frame/nfts/src/types.rs @@ -56,14 +56,8 @@ impl_incrementable!(u8, u16, u32, u64, u128, i8, i16, i32, i64, i128); #[derive(Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug, TypeInfo, MaxEncodedLen)] pub struct CollectionDetails { - /// Can change `owner`, `issuer`, `freezer` and `admin` accounts. + /// Collection's owner. pub(super) owner: AccountId, - /// Can mint tokens. - pub(super) issuer: AccountId, - /// Can thaw tokens, force transfers and burn tokens from any account. - pub(super) admin: AccountId, - /// Can freeze tokens. - pub(super) freezer: AccountId, /// The total balance deposited for the all storage associated with this collection. /// Used by `destroy`. pub(super) total_deposit: DepositBalance, @@ -84,8 +78,8 @@ pub struct DestroyWitness { /// The total number of items in this collection that have outstanding item metadata. #[codec(compact)] pub item_metadatas: u32, - #[codec(compact)] /// The total number of attributes for this collection. + #[codec(compact)] pub attributes: u32, } @@ -301,3 +295,37 @@ impl PalletFeatures { } } impl_codec_bitflags!(PalletFeatures, u64, PalletFeature); + +/// Support for up to 8 different roles for collections. +#[bitflags] +#[repr(u8)] +#[derive(Copy, Clone, RuntimeDebug, PartialEq, Eq, Encode, Decode, MaxEncodedLen, TypeInfo)] +pub enum CollectionRole { + /// Can mint items. + Issuer, + /// Can freeze items. + Freezer, + /// Can thaw items, force transfers and burn items from any account. + Admin, +} + +/// A wrapper type that implements `Codec`. +#[derive(Clone, Copy, PartialEq, Eq, Default, RuntimeDebug)] +pub struct CollectionRoles(pub BitFlags); + +impl CollectionRoles { + pub fn none() -> Self { + Self(BitFlags::EMPTY) + } + pub fn has_role(&self, role: CollectionRole) -> bool { + self.0.contains(role) + } + pub fn add_role(&mut self, role: CollectionRole) { + self.0.insert(role); + } + pub fn max_roles() -> u8 { + let all: BitFlags = BitFlags::all(); + all.len() as u8 + } +} +impl_codec_bitflags!(CollectionRoles, u8, CollectionRole); From fef9b48c76ca125bebbf1b52b4d1109358ad0e81 Mon Sep 17 00:00:00 2001 From: Jegor Sidorenko Date: Sun, 23 Oct 2022 07:28:26 +0200 Subject: [PATCH 016/101] Fix copy --- frame/nfts/src/lib.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/frame/nfts/src/lib.rs b/frame/nfts/src/lib.rs index 8b8b21f944f3c..d864374a675a5 100644 --- a/frame/nfts/src/lib.rs +++ b/frame/nfts/src/lib.rs @@ -729,7 +729,7 @@ pub mod pallet { /// Origin must be Signed and the sender should be the Admin of the `collection`. /// /// - `collection`: The collection of the item to be burned. - /// - `item`: The item of the item to be burned. + /// - `item`: The item to be burned. /// - `check_owner`: If `Some` then the operation will fail with `WrongOwner` unless the /// item is owned by this value. /// @@ -768,7 +768,7 @@ pub mod pallet { /// /// Arguments: /// - `collection`: The collection of the item to be transferred. - /// - `item`: The item of the item to be transferred. + /// - `item`: The item to be transferred. /// - `dest`: The account to receive ownership of the item. /// /// Emits `Transferred`. @@ -1026,7 +1026,7 @@ pub mod pallet { /// Origin must be Signed and must be the owner of the `item`. /// /// - `collection`: The collection of the item to be approved for delegated transfer. - /// - `item`: The item of the item to be approved for delegated transfer. + /// - `item`: The item to be approved for delegated transfer. /// - `delegate`: The account to delegate permission to transfer the item. /// - `maybe_deadline`: Optional deadline for the approval. Specified by providing the /// number of blocks after which the approval will expire From 799cdf335a4418d87d1c2d913a6fad662e43e936 Mon Sep 17 00:00:00 2001 From: Jegor Sidorenko Date: Sun, 23 Oct 2022 07:28:55 +0200 Subject: [PATCH 017/101] Remove storage_prefix --- frame/nfts/src/lib.rs | 5 ----- 1 file changed, 5 deletions(-) diff --git a/frame/nfts/src/lib.rs b/frame/nfts/src/lib.rs index d864374a675a5..671adba703eab 100644 --- a/frame/nfts/src/lib.rs +++ b/frame/nfts/src/lib.rs @@ -178,7 +178,6 @@ pub mod pallet { /// Details of a collection. #[pallet::storage] - #[pallet::storage_prefix = "Class"] pub(super) type Collection, I: 'static = ()> = StorageMap< _, Blake2_128Concat, @@ -208,7 +207,6 @@ pub mod pallet { /// The collections owned by any given account; set out this way so that collections owned by /// a single account can be enumerated. #[pallet::storage] - #[pallet::storage_prefix = "ClassAccount"] pub(super) type CollectionAccount, I: 'static = ()> = StorageDoubleMap< _, Blake2_128Concat, @@ -234,7 +232,6 @@ pub mod pallet { /// The items in existence and their ownership details. #[pallet::storage] - #[pallet::storage_prefix = "Asset"] pub(super) type Item, I: 'static = ()> = StorageDoubleMap< _, Blake2_128Concat, @@ -247,7 +244,6 @@ pub mod pallet { /// Metadata of a collection. #[pallet::storage] - #[pallet::storage_prefix = "ClassMetadataOf"] pub(super) type CollectionMetadataOf, I: 'static = ()> = StorageMap< _, Blake2_128Concat, @@ -258,7 +254,6 @@ pub mod pallet { /// Metadata of an item. #[pallet::storage] - #[pallet::storage_prefix = "InstanceMetadataOf"] pub(super) type ItemMetadataOf, I: 'static = ()> = StorageDoubleMap< _, Blake2_128Concat, From 83a601d2638ab9ae053bbe56faf517d778d23663 Mon Sep 17 00:00:00 2001 From: Jegor Sidorenko Date: Sun, 23 Oct 2022 07:30:13 +0200 Subject: [PATCH 018/101] Remove transactional --- frame/nfts/src/lib.rs | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/frame/nfts/src/lib.rs b/frame/nfts/src/lib.rs index 671adba703eab..73c8e2c391e68 100644 --- a/frame/nfts/src/lib.rs +++ b/frame/nfts/src/lib.rs @@ -48,7 +48,7 @@ use frame_support::{ traits::{ tokens::Locker, BalanceStatus::Reserved, Currency, EnsureOriginWithArg, ReservableCurrency, }, - transactional, BoundedBTreeMap, + BoundedBTreeMap, }; use frame_system::Config as SystemConfig; use sp_runtime::{ @@ -1766,7 +1766,6 @@ pub mod pallet { /// /// Emits `ItemBought` on success. #[pallet::weight(T::WeightInfo::buy_item())] - #[transactional] pub fn buy_item( origin: OriginFor, collection: T::CollectionId, @@ -1785,7 +1784,6 @@ pub mod pallet { /// /// Emits `TipSent` on every tip transfer. #[pallet::weight(T::WeightInfo::pay_tips(tips.len() as u32))] - #[transactional] pub fn pay_tips( origin: OriginFor, tips: BoundedVec, T::MaxTips>, From cbffb27878e9e7558713aa9fa4111cf4c6c6e74e Mon Sep 17 00:00:00 2001 From: lana-shanghai Date: Wed, 26 Oct 2022 11:39:38 +0200 Subject: [PATCH 019/101] Initial commit SFT pallet. --- frame/erc1155/Cargo.toml | 44 ++++++++ frame/erc1155/README.md | 4 + frame/erc1155/src/benchmarking.rs | 20 ++++ frame/erc1155/src/lib.rs | 164 ++++++++++++++++++++++++++++++ frame/erc1155/src/mock.rs | 150 +++++++++++++++++++++++++++ frame/erc1155/src/tests.rs | 16 +++ 6 files changed, 398 insertions(+) create mode 100644 frame/erc1155/Cargo.toml create mode 100644 frame/erc1155/README.md create mode 100644 frame/erc1155/src/benchmarking.rs create mode 100644 frame/erc1155/src/lib.rs create mode 100644 frame/erc1155/src/mock.rs create mode 100644 frame/erc1155/src/tests.rs diff --git a/frame/erc1155/Cargo.toml b/frame/erc1155/Cargo.toml new file mode 100644 index 0000000000000..e527471c2b0f8 --- /dev/null +++ b/frame/erc1155/Cargo.toml @@ -0,0 +1,44 @@ +[package] +name = "pallet-erc1155" +version = "4.0.0-dev" +description = "FRAME pallet for semi-fungible tokens." +authors = ["Parity Technologies "] +homepage = "https://substrate.io" +edition = "2021" +license = "Unlicense" +publish = false +repository = "https://github.com/substrate-developer-hub/substrate-node-template/" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = [ + "derive", +] } +log = "0.4" +scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } +frame-benchmarking = { version = "4.0.0-dev", default-features = false, optional = true, git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v0.9.30" } +frame-support = { version = "4.0.0-dev", default-features = false, git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v0.9.30" } +frame-system = { version = "4.0.0-dev", default-features = false, git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v0.9.30" } +pallet-assets = { version = "4.0.0-dev", default-features = false, git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v0.9.30" } +pallet-balances = { version = "4.0.0-dev", default-features = false, git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v0.9.30" } +pallet-uniques = { version = "4.0.0-dev", default-features = false, git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v0.9.30" } +sp-runtime = { version = "6.0.0", default-features = false, git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v0.9.30" } + +[dev-dependencies] +sp-core = { version = "6.0.0", default-features = false, git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v0.9.30" } +sp-io = { version = "6.0.0", default-features = false, git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v0.9.30" } + + +[features] +default = ["std"] +std = [ + "codec/std", + "frame-benchmarking?/std", + "frame-support/std", + "frame-system/std", + "scale-info/std", +] +runtime-benchmarks = ["frame-benchmarking/runtime-benchmarks"] +try-runtime = ["frame-support/try-runtime"] diff --git a/frame/erc1155/README.md b/frame/erc1155/README.md new file mode 100644 index 0000000000000..5331a7a737563 --- /dev/null +++ b/frame/erc1155/README.md @@ -0,0 +1,4 @@ +### Lock NFT + +Lock an NFT from `pallet-uniques`, automatically mint an fungible +asset from `pallet-assets`. \ No newline at end of file diff --git a/frame/erc1155/src/benchmarking.rs b/frame/erc1155/src/benchmarking.rs new file mode 100644 index 0000000000000..d496a9fc89b1a --- /dev/null +++ b/frame/erc1155/src/benchmarking.rs @@ -0,0 +1,20 @@ +//! Benchmarking setup for pallet-template + +use super::*; + +#[allow(unused)] +use crate::Pallet as Template; +use frame_benchmarking::{benchmarks, whitelisted_caller}; +use frame_system::RawOrigin; + +benchmarks! { + do_something { + let s in 0 .. 100; + let caller: T::AccountId = whitelisted_caller(); + }: _(RawOrigin::Signed(caller), s) + verify { + assert_eq!(Something::::get(), Some(s)); + } + + impl_benchmark_test_suite!(Template, crate::mock::new_test_ext(), crate::mock::Test); +} diff --git a/frame/erc1155/src/lib.rs b/frame/erc1155/src/lib.rs new file mode 100644 index 0000000000000..3928b5cbe6649 --- /dev/null +++ b/frame/erc1155/src/lib.rs @@ -0,0 +1,164 @@ +#![cfg_attr(not(feature = "std"), no_std)] + +pub use pallet::*; + +#[cfg(test)] +mod mock; + +#[cfg(test)] +mod tests; + +#[cfg(feature = "runtime-benchmarks")] +mod benchmarking; + +pub use scale_info::Type; + +pub type ItemId = ::ItemId; +pub type CollectionId = ::CollectionId; + +#[frame_support::pallet] +pub mod pallet { + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; + + use frame_support::{ + dispatch::DispatchResult, + sp_runtime::traits::{AccountIdConversion, StaticLookup}, + traits::Currency, + PalletId, + }; + + pub type BalanceOf = + <::Currency as Currency<::AccountId>>::Balance; + pub type AccountIdLookupOf = <::Lookup as StaticLookup>::Source; + + #[pallet::pallet] + #[pallet::generate_store(pub(super) trait Store)] + pub struct Pallet(_); + + #[pallet::config] + pub trait Config: + frame_system::Config + pallet_uniques::Config + pallet_assets::Config + { + type RuntimeEvent: From> + IsType<::RuntimeEvent>; + + type Currency: Currency; + + type CollectionId; + + type ItemId; + + type AssetId; + + #[pallet::constant] + type PalletId: Get; + } + + #[pallet::storage] + #[pallet::getter(fn assets_minted)] + // TODO: query amount minted from pallet assets instead of storing it locally. + // Add a public getter function to pallet assets. + pub type AssetsMinted = StorageMap< + _, + Twox64Concat, + ::AssetId, + BalanceOf, + OptionQuery, + >; + + #[pallet::event] + #[pallet::generate_deposit(pub(super) fn deposit_event)] + pub enum Event { + NFTLocked( + ::CollectionId, + ::ItemId, + ), + AssetCreated(::AssetId), + AssetMinted(::AssetId, ::Balance), + } + + #[pallet::error] + pub enum Error { + AssetAlreadyRegistered, + } + + #[pallet::call] + impl Pallet { + #[pallet::weight(10_000 + T::DbWeight::get().writes(2).ref_time())] + pub fn lock_nft_create_asset( + origin: OriginFor, + collection_id: ::CollectionId, + item_id: ::ItemId, + asset_id: ::AssetId, + beneficiary: AccountIdLookupOf, + min_balance: ::Balance, + amount: ::Balance, + ) -> DispatchResult { + let _who = ensure_signed(origin.clone())?; + let admin_account_id = Self::pallet_account_id(); + let admin = T::Lookup::unlookup(admin_account_id.clone()); + + match Self::do_lock_nft(origin.clone(), collection_id, item_id) { + Err(e) => return Err(e), + Ok(()) => match Self::do_create_asset(origin.clone(), asset_id, admin, min_balance) + { + Err(e) => return Err(e), + Ok(()) => match Self::do_mint_asset( + // Minting the asset is only possible from the pallet's origin. + // TODO: should the minting be possible from the owner's account? + frame_system::RawOrigin::Signed(admin_account_id).into(), + asset_id, + beneficiary, + amount, + ) { + Err(e) => return Err(e), + Ok(()) => { + Self::deposit_event(Event::NFTLocked(collection_id, item_id)); + Self::deposit_event(Event::AssetCreated(asset_id)); + Self::deposit_event(Event::AssetMinted(asset_id, amount)); + }, + }, + }, + }; + + Ok(()) + } + + + // TODO: return and burn 100% of the asset, unlock the NFT. + // pub fn burn_asset_unlock_nft() -> DispatchResult {} + } + + impl Pallet { + fn pallet_account_id() -> T::AccountId { + T::PalletId::get().into_account_truncating() + } + + fn do_lock_nft( + who: OriginFor, + collection_id: ::CollectionId, + item_id: ::ItemId, + ) -> DispatchResult { + let pallet_id = T::Lookup::unlookup(Self::pallet_account_id()); + >::transfer(who, collection_id, item_id, pallet_id) + } + + fn do_create_asset( + who: OriginFor, + asset_id: ::AssetId, + admin: AccountIdLookupOf, + min_balance: ::Balance, + ) -> DispatchResult { + >::create(who, asset_id, admin, min_balance) + } + + fn do_mint_asset( + who: OriginFor, + asset_id: ::AssetId, + beneficiary: AccountIdLookupOf, + amount: ::Balance, + ) -> DispatchResult { + >::mint(who, asset_id, beneficiary, amount) + } + } +} diff --git a/frame/erc1155/src/mock.rs b/frame/erc1155/src/mock.rs new file mode 100644 index 0000000000000..d531d2401b0ac --- /dev/null +++ b/frame/erc1155/src/mock.rs @@ -0,0 +1,150 @@ +use crate as pallet_erc1155; +use frame_support::{ + parameter_types, + traits::{AsEnsureOriginWithArg, ConstU128, ConstU16, ConstU32, ConstU64}, + PalletId, +}; +use frame_system as system; +use frame_system::{EnsureRoot, EnsureSigned}; +use sp_core::H256; +use sp_runtime::{ + testing::Header, + traits::{BlakeTwo256, IdentityLookup}, +}; + +type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; +type Block = frame_system::mocking::MockBlock; +type Balance = u128; +type AccountId = u64; + +pub const EXISTENTIAL_DEPOSIT: u128 = 500; +pub const MILLICENTS: Balance = 1_000_000_000; +pub const CENTS: Balance = 1_000 * MILLICENTS; +pub const DOLLARS: Balance = 100 * CENTS; + +// Configure a mock runtime to test the pallet. +frame_support::construct_runtime!( + pub enum Test where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: frame_system, + Erc1155: pallet_erc1155, + Assets: pallet_assets, + Uniques: pallet_uniques, + Balances: pallet_balances, + } +); + +impl system::Config for Test { + type BaseCallFilter = frame_support::traits::Everything; + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); + type RuntimeOrigin = RuntimeOrigin; + type RuntimeCall = RuntimeCall; + type Index = u64; + type BlockNumber = u64; + type Hash = H256; + type Hashing = BlakeTwo256; + type AccountId = u64; + type Lookup = IdentityLookup; + type Header = Header; + type RuntimeEvent = RuntimeEvent; + type BlockHashCount = ConstU64<250>; + type Version = (); + type PalletInfo = PalletInfo; + type AccountData = pallet_balances::AccountData; + type OnNewAccount = (); + type OnKilledAccount = (); + type SystemWeightInfo = (); + type SS58Prefix = ConstU16<42>; + type OnSetCode = (); + type MaxConsumers = frame_support::traits::ConstU32<16>; +} + +impl pallet_balances::Config for Test { + type MaxLocks = ConstU32<50>; + type MaxReserves = (); + type ReserveIdentifier = [u8; 8]; + /// The type for recording an account's balance. + type Balance = Balance; + /// The ubiquitous event type. + type RuntimeEvent = RuntimeEvent; + type DustRemoval = (); + type ExistentialDeposit = ConstU128; + type AccountStore = System; + type WeightInfo = pallet_balances::weights::SubstrateWeight; +} + +parameter_types! { + pub const AssetDeposit: Balance = 100 * DOLLARS; + pub const ApprovalDeposit: Balance = 1 * DOLLARS; + pub const StringLimit: u32 = 50; + pub const MetadataDepositBase: Balance = 10 * DOLLARS; + pub const MetadataDepositPerByte: Balance = 1 * DOLLARS; +} + +impl pallet_assets::Config for Test { + type RuntimeEvent = RuntimeEvent; + type Balance = u128; + type AssetId = u32; + type Currency = Balances; + type ForceOrigin = EnsureRoot; + type AssetDeposit = AssetDeposit; + type AssetAccountDeposit = ConstU128; + type MetadataDepositBase = MetadataDepositBase; + type MetadataDepositPerByte = MetadataDepositPerByte; + type ApprovalDeposit = ApprovalDeposit; + type StringLimit = StringLimit; + type Freezer = (); + type Extra = (); + type WeightInfo = pallet_assets::weights::SubstrateWeight; +} + +parameter_types! { + pub const CollectionDeposit: Balance = 100 * DOLLARS; + pub const ItemDeposit: Balance = 1 * DOLLARS; + pub const KeyLimit: u32 = 32; + pub const ValueLimit: u32 = 256; +} + +impl pallet_uniques::Config for Test { + type RuntimeEvent = RuntimeEvent; + type CollectionId = u32; + type ItemId = u32; + type Currency = Balances; + type ForceOrigin = frame_system::EnsureRoot; + type CollectionDeposit = CollectionDeposit; + type ItemDeposit = ItemDeposit; + type MetadataDepositBase = MetadataDepositBase; + type AttributeDepositBase = MetadataDepositBase; + type DepositPerByte = MetadataDepositPerByte; + type StringLimit = StringLimit; + type KeyLimit = KeyLimit; + type ValueLimit = ValueLimit; + type WeightInfo = pallet_uniques::weights::SubstrateWeight; + #[cfg(feature = "runtime-benchmarks")] + type Helper = (); + type CreateOrigin = AsEnsureOriginWithArg>; + type Locker = (); +} + +parameter_types! { + pub const ERC1155PalletId: PalletId = PalletId(*b"erc1155 "); +} + +impl pallet_erc1155::Config for Test { + type RuntimeEvent = RuntimeEvent; + type PalletId = ERC1155PalletId; + type Currency = Balances; + type CollectionId = Uniques; + type ItemId = Uniques; + type AssetId = Assets; +} + +// Build genesis storage according to the mock runtime. +pub fn new_test_ext() -> sp_io::TestExternalities { + system::GenesisConfig::default().build_storage::().unwrap().into() +} diff --git a/frame/erc1155/src/tests.rs b/frame/erc1155/src/tests.rs new file mode 100644 index 0000000000000..756531d9dc3d6 --- /dev/null +++ b/frame/erc1155/src/tests.rs @@ -0,0 +1,16 @@ +use crate::{mock::*, Error}; +use frame_support::{assert_noop, assert_ok, traits::Currency}; + +#[test] +fn address_is_set() { + new_test_ext().execute_with(|| { + // Dispatch a signed extrinsic. + assert_eq!(Erc1155::pallet_address(), None); + assert_ok!(Erc1155::set_pallet_address(RuntimeOrigin::signed(1))); + assert_eq!(Erc1155::pallet_address(), Some(1u64)); + // assert_eq!( + // Erc1155::issuance(), + // Some(>::total_issuance()) + // ) + }); +} From e1b24d79adc82c339b3b83ef8a22a8cdd9390109 Mon Sep 17 00:00:00 2001 From: Jegor Sidorenko Date: Fri, 28 Oct 2022 16:45:18 +0200 Subject: [PATCH 020/101] Update comment --- frame/nfts/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/frame/nfts/src/lib.rs b/frame/nfts/src/lib.rs index 73c8e2c391e68..00fb6dc238f86 100644 --- a/frame/nfts/src/lib.rs +++ b/frame/nfts/src/lib.rs @@ -277,7 +277,7 @@ pub mod pallet { OptionQuery, >; - /// Price of an asset instance. + /// A price of an item. #[pallet::storage] pub(super) type ItemPriceOf, I: 'static = ()> = StorageDoubleMap< _, From afd4c18a4676c5889a03beb9591384851f695498 Mon Sep 17 00:00:00 2001 From: Jegor Sidorenko <5252494+jsidorenko@users.noreply.github.com> Date: Wed, 16 Nov 2022 11:12:50 +0200 Subject: [PATCH 021/101] [Uniques V2] Minting options (#12483) * Basics * WIP: change the data format * Refactor * Remove redundant new() method * Rename settings * Enable tests * Chore * Change params order * Delete the config on collection removal * Chore * Remove redundant system features * Rename force_item_status to force_collection_status * Update node runtime * Chore * Remove thaw_collection * Chore * Connect collection.is_frozen to config * Allow to lock the collection in a new way * Move free_holding into settings * Connect collection's metadata locker to feature flags * DRY * Chore * Connect pallet level feature flags * Prepare tests for the new changes * Implement Item settings * Allow to lock the metadata or attributes of an item * Common -> Settings * Extract settings related code to a separate file * Move feature flag checks inside the do_* methods * Split settings.rs into parts * Extract repeated code into macro * Extract macros into their own file * Chore * Fix traits * Fix traits * Test SystemFeatures * Fix benchmarks * Add missing benchmark * Fix node/runtime/lib.rs * ".git/.scripts/bench-bot.sh" pallet dev pallet_nfts * Keep item's config on burn if it's not empty * Fix the merge artifacts * Fmt * Add SystemFeature::NoSwaps check * Rename SystemFeatures to PalletFeatures * Rename errors * Add docs * Change error message * Change the format of CollectionConfig to store more data * Move max supply to the CollectionConfig and allow to change it * Remove ItemConfig from the mint() function and use the one set in mint settings * Add different mint options * Allow to change the mint settings * Add a force_mint() method * Check mint params * Some optimisations * Cover with tests * Remove merge artifacts * Chore * Use the new has_role() method * Rework item deposits * More tests * Refactoring * Address comments * Refactor lock_collection() * Update frame/nfts/src/types.rs Co-authored-by: Squirrel * Update frame/nfts/src/types.rs Co-authored-by: Squirrel * Update frame/nfts/src/lib.rs Co-authored-by: Squirrel * Update frame/nfts/src/lib.rs Co-authored-by: Squirrel * Private => Issuer * Add more tests * Fix benchmarks * Add benchmarks for new methods * [Uniques v2] Refactoring (#12570) * Move do_set_price() and do_buy_item() to buy_sell.rs * Move approvals to feature file * Move metadata to feature files * Move the rest of methods to feature files * Remove artifacts * Split force_collection_status into 2 methods * Fix benchmarks * Fix benchmarks * Update deps Co-authored-by: command-bot <> Co-authored-by: Squirrel --- frame/nfts/Cargo.toml | 10 +- frame/nfts/src/benchmarking.rs | 96 ++- frame/nfts/src/common_functions.rs | 42 + frame/nfts/src/features/approvals.rs | 132 ++++ frame/nfts/src/features/attributes.rs | 123 +++ frame/nfts/src/features/buy_sell.rs | 90 ++- .../src/features/create_delete_collection.rs | 109 +++ frame/nfts/src/features/create_delete_item.rs | 123 +++ frame/nfts/src/features/lock.rs | 16 +- frame/nfts/src/features/metadata.rs | 173 +++++ frame/nfts/src/features/mod.rs | 6 + frame/nfts/src/features/roles.rs | 30 + frame/nfts/src/features/settings.rs | 70 +- frame/nfts/src/features/transfer.rs | 165 ++++ frame/nfts/src/functions.rs | 355 --------- frame/nfts/src/impl_nonfungibles.rs | 20 +- frame/nfts/src/lib.rs | 733 ++++++------------ frame/nfts/src/tests.rs | 514 ++++++------ frame/nfts/src/types.rs | 163 +++- frame/nfts/src/weights.rs | 61 +- .../src/traits/tokens/nonfungible_v2.rs | 22 +- .../src/traits/tokens/nonfungibles_v2.rs | 1 + 22 files changed, 1856 insertions(+), 1198 deletions(-) create mode 100644 frame/nfts/src/common_functions.rs create mode 100644 frame/nfts/src/features/approvals.rs create mode 100644 frame/nfts/src/features/attributes.rs create mode 100644 frame/nfts/src/features/create_delete_collection.rs create mode 100644 frame/nfts/src/features/create_delete_item.rs create mode 100644 frame/nfts/src/features/metadata.rs create mode 100644 frame/nfts/src/features/transfer.rs delete mode 100644 frame/nfts/src/functions.rs diff --git a/frame/nfts/Cargo.toml b/frame/nfts/Cargo.toml index f0b68ea702e3a..109dffdd10f50 100644 --- a/frame/nfts/Cargo.toml +++ b/frame/nfts/Cargo.toml @@ -20,14 +20,14 @@ scale-info = { version = "2.1.1", default-features = false, features = ["derive" frame-benchmarking = { version = "4.0.0-dev", default-features = false, optional = true, path = "../benchmarking" } frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } -sp-runtime = { version = "6.0.0", default-features = false, path = "../../primitives/runtime" } -sp-std = { version = "4.0.0", default-features = false, path = "../../primitives/std" } +sp-runtime = { version = "7.0.0", default-features = false, path = "../../primitives/runtime" } +sp-std = { version = "5.0.0", default-features = false, path = "../../primitives/std" } [dev-dependencies] pallet-balances = { version = "4.0.0-dev", path = "../balances" } -sp-core = { version = "6.0.0", path = "../../primitives/core" } -sp-io = { version = "6.0.0", path = "../../primitives/io" } -sp-std = { version = "4.0.0", path = "../../primitives/std" } +sp-core = { version = "7.0.0", path = "../../primitives/core" } +sp-io = { version = "7.0.0", path = "../../primitives/io" } +sp-std = { version = "5.0.0", path = "../../primitives/std" } [features] default = ["std"] diff --git a/frame/nfts/src/benchmarking.rs b/frame/nfts/src/benchmarking.rs index a5a264c40a715..61407abd9f985 100644 --- a/frame/nfts/src/benchmarking.rs +++ b/frame/nfts/src/benchmarking.rs @@ -20,6 +20,7 @@ #![cfg(feature = "runtime-benchmarks")] use super::*; +use enumflags2::{BitFlag, BitFlags}; use frame_benchmarking::{ account, benchmarks_instance_pallet, whitelist_account, whitelisted_caller, }; @@ -46,7 +47,7 @@ fn create_collection, I: 'static>( assert_ok!(Nfts::::force_create( SystemOrigin::Root.into(), caller_lookup.clone(), - CollectionConfig::all_settings_enabled() + default_collection_config::() )); (collection, caller, caller_lookup) } @@ -78,8 +79,7 @@ fn mint_item, I: 'static>( SystemOrigin::Signed(caller.clone()).into(), T::Helper::collection(0), item, - caller_lookup.clone(), - ItemConfig::all_settings_enabled(), + None, )); (item, caller, caller_lookup) } @@ -128,6 +128,24 @@ fn assert_last_event, I: 'static>(generic_event: >:: assert_eq!(event, &system_event); } +fn make_collection_config, I: 'static>( + disable_settings: BitFlags, +) -> CollectionConfigFor { + CollectionConfig { + settings: CollectionSettings::from_disabled(disable_settings), + max_supply: None, + mint_settings: MintSettings::default(), + } +} + +fn default_collection_config, I: 'static>() -> CollectionConfigFor { + make_collection_config::(CollectionSetting::empty()) +} + +fn default_item_config() -> ItemConfig { + ItemConfig { settings: ItemSettings::all_enabled() } +} + benchmarks_instance_pallet! { create { let collection = T::Helper::collection(0); @@ -136,7 +154,7 @@ benchmarks_instance_pallet! { whitelist_account!(caller); let admin = T::Lookup::unlookup(caller.clone()); T::Currency::make_free_balance_be(&caller, DepositBalanceOf::::max_value()); - let call = Call::::create { admin, config: CollectionConfig::all_settings_enabled() }; + let call = Call::::create { admin, config: default_collection_config::() }; }: { call.dispatch_bypass_filter(origin)? } verify { assert_last_event::(Event::Created { collection: T::Helper::collection(0), creator: caller.clone(), owner: caller }.into()); @@ -145,7 +163,7 @@ benchmarks_instance_pallet! { force_create { let caller: T::AccountId = whitelisted_caller(); let caller_lookup = T::Lookup::unlookup(caller.clone()); - }: _(SystemOrigin::Root, caller_lookup, CollectionConfig::all_settings_enabled()) + }: _(SystemOrigin::Root, caller_lookup, default_collection_config::()) verify { assert_last_event::(Event::ForceCreated { collection: T::Helper::collection(0), owner: caller }.into()); } @@ -169,7 +187,15 @@ benchmarks_instance_pallet! { mint { let (collection, caller, caller_lookup) = create_collection::(); let item = T::Helper::item(0); - }: _(SystemOrigin::Signed(caller.clone()), collection, item, caller_lookup, ItemConfig::all_settings_enabled()) + }: _(SystemOrigin::Signed(caller.clone()), collection, item, None) + verify { + assert_last_event::(Event::Issued { collection, item, owner: caller }.into()); + } + + force_mint { + let (collection, caller, caller_lookup) = create_collection::(); + let item = T::Helper::item(0); + }: _(SystemOrigin::Signed(caller.clone()), collection, item, caller_lookup, default_item_config()) verify { assert_last_event::(Event::Issued { collection, item, owner: caller }.into()); } @@ -188,6 +214,7 @@ benchmarks_instance_pallet! { let target: T::AccountId = account("target", 0, SEED); let target_lookup = T::Lookup::unlookup(target.clone()); + T::Currency::make_free_balance_be(&target, T::Currency::minimum_balance()); }: _(SystemOrigin::Signed(caller.clone()), collection, item, target_lookup) verify { assert_last_event::(Event::Transferred { collection, item, from: caller, to: target }.into()); @@ -197,14 +224,10 @@ benchmarks_instance_pallet! { let i in 0 .. 5_000; let (collection, caller, caller_lookup) = create_collection::(); let items = (0..i).map(|x| mint_item::(x as u16).0).collect::>(); - Nfts::::force_collection_status( + Nfts::::force_collection_config( SystemOrigin::Root.into(), collection, - caller_lookup.clone(), - caller_lookup.clone(), - caller_lookup.clone(), - caller_lookup, - CollectionConfig(CollectionSetting::DepositRequired.into()), + make_collection_config::(CollectionSetting::DepositRequired.into()), )?; }: _(SystemOrigin::Signed(caller.clone()), collection, items.clone()) verify { @@ -234,12 +257,13 @@ benchmarks_instance_pallet! { lock_collection { let (collection, caller, caller_lookup) = create_collection::(); - let lock_config = CollectionConfig( + let lock_settings = CollectionSettings::from_disabled( CollectionSetting::TransferableItems | CollectionSetting::UnlockedMetadata | - CollectionSetting::UnlockedAttributes, + CollectionSetting::UnlockedAttributes | + CollectionSetting::UnlockedMaxSupply, ); - }: _(SystemOrigin::Signed(caller.clone()), collection, lock_config) + }: _(SystemOrigin::Signed(caller.clone()), collection, lock_settings) verify { assert_last_event::(Event::CollectionLocked { collection }.into()); } @@ -271,20 +295,31 @@ benchmarks_instance_pallet! { }.into()); } - force_collection_status { + force_collection_owner { + let (collection, _, _) = create_collection::(); + let origin = T::ForceOrigin::successful_origin(); + let target: T::AccountId = account("target", 0, SEED); + let target_lookup = T::Lookup::unlookup(target.clone()); + T::Currency::make_free_balance_be(&target, T::Currency::minimum_balance()); + let call = Call::::force_collection_owner { + collection, + owner: target_lookup, + }; + }: { call.dispatch_bypass_filter(origin)? } + verify { + assert_last_event::(Event::OwnerChanged { collection, new_owner: target }.into()); + } + + force_collection_config { let (collection, caller, caller_lookup) = create_collection::(); let origin = T::ForceOrigin::successful_origin(); - let call = Call::::force_collection_status { + let call = Call::::force_collection_config { collection, - owner: caller_lookup.clone(), - issuer: caller_lookup.clone(), - admin: caller_lookup.clone(), - freezer: caller_lookup, - config: CollectionConfig(CollectionSetting::DepositRequired.into()), + config: make_collection_config::(CollectionSetting::DepositRequired.into()), }; }: { call.dispatch_bypass_filter(origin)? } verify { - assert_last_event::(Event::CollectionStatusChanged { collection }.into()); + assert_last_event::(Event::CollectionConfigChanged { collection }.into()); } lock_item_properties { @@ -414,6 +449,20 @@ benchmarks_instance_pallet! { }.into()); } + update_mint_settings { + let (collection, caller, _) = create_collection::(); + let mint_settings = MintSettings { + mint_type: MintType::HolderOf(T::Helper::collection(0)), + start_block: Some(One::one()), + end_block: Some(One::one()), + price: Some(ItemPrice::::from(1u32)), + default_item_settings: ItemSettings::all_enabled(), + }; + }: _(SystemOrigin::Signed(caller.clone()), collection, mint_settings) + verify { + assert_last_event::(Event::CollectionMintSettingsUpdated { collection }.into()); + } + set_price { let (collection, caller, _) = create_collection::(); let (item, ..) = mint_item::(0); @@ -528,6 +577,7 @@ benchmarks_instance_pallet! { let duration = T::MaxDeadlineDuration::get(); let target: T::AccountId = account("target", 0, SEED); let target_lookup = T::Lookup::unlookup(target.clone()); + T::Currency::make_free_balance_be(&target, T::Currency::minimum_balance()); let origin = SystemOrigin::Signed(caller.clone()); frame_system::Pallet::::set_block_number(One::one()); Nfts::::transfer(origin.clone().into(), collection, item2, target_lookup)?; diff --git a/frame/nfts/src/common_functions.rs b/frame/nfts/src/common_functions.rs new file mode 100644 index 0000000000000..b3cac7f69ec0e --- /dev/null +++ b/frame/nfts/src/common_functions.rs @@ -0,0 +1,42 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Various pieces of common functionality. + +use super::*; + +impl, I: 'static> Pallet { + /// Get the owner of the item, if the item exists. + pub fn owner(collection: T::CollectionId, item: T::ItemId) -> Option { + Item::::get(collection, item).map(|i| i.owner) + } + + /// Get the owner of the item, if the item exists. + pub fn collection_owner(collection: T::CollectionId) -> Option { + Collection::::get(collection).map(|i| i.owner) + } + + #[cfg(any(test, feature = "runtime-benchmarks"))] + pub fn set_next_id(id: T::CollectionId) { + NextCollectionId::::set(Some(id)); + } + + #[cfg(test)] + pub fn get_next_id() -> T::CollectionId { + NextCollectionId::::get().unwrap_or(T::CollectionId::initial_value()) + } +} diff --git a/frame/nfts/src/features/approvals.rs b/frame/nfts/src/features/approvals.rs new file mode 100644 index 0000000000000..0cbceb9113d0c --- /dev/null +++ b/frame/nfts/src/features/approvals.rs @@ -0,0 +1,132 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::*; +use frame_support::pallet_prelude::*; + +impl, I: 'static> Pallet { + pub(crate) fn do_approve_transfer( + maybe_check_origin: Option, + collection: T::CollectionId, + item: T::ItemId, + delegate: T::AccountId, + maybe_deadline: Option<::BlockNumber>, + ) -> DispatchResult { + ensure!( + Self::is_pallet_feature_enabled(PalletFeature::Approvals), + Error::::MethodDisabled + ); + let mut details = + Item::::get(&collection, &item).ok_or(Error::::UnknownItem)?; + + let collection_config = Self::get_collection_config(&collection)?; + ensure!( + collection_config.is_setting_enabled(CollectionSetting::TransferableItems), + Error::::ItemsNonTransferable + ); + + if let Some(check_origin) = maybe_check_origin { + let is_admin = Self::has_role(&collection, &check_origin, CollectionRole::Admin); + let permitted = is_admin || check_origin == details.owner; + ensure!(permitted, Error::::NoPermission); + } + + let now = frame_system::Pallet::::block_number(); + let deadline = maybe_deadline.map(|d| d.saturating_add(now)); + + details + .approvals + .try_insert(delegate.clone(), deadline) + .map_err(|_| Error::::ReachedApprovalLimit)?; + Item::::insert(&collection, &item, &details); + + Self::deposit_event(Event::ApprovedTransfer { + collection, + item, + owner: details.owner, + delegate, + deadline, + }); + + Ok(()) + } + + pub(crate) fn do_cancel_approval( + maybe_check_origin: Option, + collection: T::CollectionId, + item: T::ItemId, + delegate: T::AccountId, + ) -> DispatchResult { + let mut details = + Item::::get(&collection, &item).ok_or(Error::::UnknownItem)?; + + let maybe_deadline = details.approvals.get(&delegate).ok_or(Error::::NotDelegate)?; + + let is_past_deadline = if let Some(deadline) = maybe_deadline { + let now = frame_system::Pallet::::block_number(); + now > *deadline + } else { + false + }; + + if !is_past_deadline { + if let Some(check_origin) = maybe_check_origin { + let is_admin = Self::has_role(&collection, &check_origin, CollectionRole::Admin); + let permitted = is_admin || check_origin == details.owner; + ensure!(permitted, Error::::NoPermission); + } + } + + details.approvals.remove(&delegate); + Item::::insert(&collection, &item, &details); + + Self::deposit_event(Event::ApprovalCancelled { + collection, + item, + owner: details.owner, + delegate, + }); + + Ok(()) + } + + pub(crate) fn do_clear_all_transfer_approvals( + maybe_check_origin: Option, + collection: T::CollectionId, + item: T::ItemId, + ) -> DispatchResult { + let mut details = + Item::::get(&collection, &item).ok_or(Error::::UnknownCollection)?; + + if let Some(check_origin) = maybe_check_origin { + let is_admin = Self::has_role(&collection, &check_origin, CollectionRole::Admin); + let permitted = is_admin || check_origin == details.owner; + ensure!(permitted, Error::::NoPermission); + } + + details.approvals.clear(); + Item::::insert(&collection, &item, &details); + + Self::deposit_event(Event::AllApprovalsCancelled { + collection, + item, + owner: details.owner, + }); + + Ok(()) + } +} diff --git a/frame/nfts/src/features/attributes.rs b/frame/nfts/src/features/attributes.rs new file mode 100644 index 0000000000000..85c1e0b302d12 --- /dev/null +++ b/frame/nfts/src/features/attributes.rs @@ -0,0 +1,123 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::*; +use frame_support::pallet_prelude::*; + +impl, I: 'static> Pallet { + pub(crate) fn do_set_attribute( + maybe_check_owner: Option, + collection: T::CollectionId, + maybe_item: Option, + key: BoundedVec, + value: BoundedVec, + ) -> DispatchResult { + ensure!( + Self::is_pallet_feature_enabled(PalletFeature::Attributes), + Error::::MethodDisabled + ); + + let mut collection_details = + Collection::::get(&collection).ok_or(Error::::UnknownCollection)?; + + if let Some(check_owner) = &maybe_check_owner { + ensure!(check_owner == &collection_details.owner, Error::::NoPermission); + } + + let collection_config = Self::get_collection_config(&collection)?; + match maybe_item { + None => { + ensure!( + collection_config.is_setting_enabled(CollectionSetting::UnlockedAttributes), + Error::::LockedCollectionAttributes + ) + }, + Some(item) => { + let maybe_is_locked = Self::get_item_config(&collection, &item) + .map(|c| c.has_disabled_setting(ItemSetting::UnlockedAttributes))?; + ensure!(!maybe_is_locked, Error::::LockedItemAttributes); + }, + }; + + let attribute = Attribute::::get((collection, maybe_item, &key)); + if attribute.is_none() { + collection_details.attributes.saturating_inc(); + } + let old_deposit = attribute.map_or(Zero::zero(), |m| m.1); + collection_details.total_deposit.saturating_reduce(old_deposit); + let mut deposit = Zero::zero(); + if collection_config.is_setting_enabled(CollectionSetting::DepositRequired) && + maybe_check_owner.is_some() + { + deposit = T::DepositPerByte::get() + .saturating_mul(((key.len() + value.len()) as u32).into()) + .saturating_add(T::AttributeDepositBase::get()); + } + collection_details.total_deposit.saturating_accrue(deposit); + if deposit > old_deposit { + T::Currency::reserve(&collection_details.owner, deposit - old_deposit)?; + } else if deposit < old_deposit { + T::Currency::unreserve(&collection_details.owner, old_deposit - deposit); + } + + Attribute::::insert((&collection, maybe_item, &key), (&value, deposit)); + Collection::::insert(collection, &collection_details); + Self::deposit_event(Event::AttributeSet { collection, maybe_item, key, value }); + Ok(()) + } + + pub(crate) fn do_clear_attribute( + maybe_check_owner: Option, + collection: T::CollectionId, + maybe_item: Option, + key: BoundedVec, + ) -> DispatchResult { + let mut collection_details = + Collection::::get(&collection).ok_or(Error::::UnknownCollection)?; + if let Some(check_owner) = &maybe_check_owner { + ensure!(check_owner == &collection_details.owner, Error::::NoPermission); + } + + if maybe_check_owner.is_some() { + match maybe_item { + None => { + let collection_config = Self::get_collection_config(&collection)?; + ensure!( + collection_config.is_setting_enabled(CollectionSetting::UnlockedAttributes), + Error::::LockedCollectionAttributes + ) + }, + Some(item) => { + // NOTE: if the item was previously burned, the ItemConfigOf record might + // not exist. In that case, we allow to clear the attribute. + let maybe_is_locked = Self::get_item_config(&collection, &item) + .map_or(false, |c| c.has_disabled_setting(ItemSetting::UnlockedAttributes)); + ensure!(!maybe_is_locked, Error::::LockedItemAttributes); + }, + }; + } + + if let Some((_, deposit)) = Attribute::::take((collection, maybe_item, &key)) { + collection_details.attributes.saturating_dec(); + collection_details.total_deposit.saturating_reduce(deposit); + T::Currency::unreserve(&collection_details.owner, deposit); + Collection::::insert(collection, &collection_details); + Self::deposit_event(Event::AttributeCleared { collection, maybe_item, key }); + } + Ok(()) + } +} diff --git a/frame/nfts/src/features/buy_sell.rs b/frame/nfts/src/features/buy_sell.rs index c1e29057af9c9..8ba5171f8d822 100644 --- a/frame/nfts/src/features/buy_sell.rs +++ b/frame/nfts/src/features/buy_sell.rs @@ -18,7 +18,7 @@ use crate::*; use frame_support::{ pallet_prelude::*, - traits::{Currency, ExistenceRequirement::KeepAlive}, + traits::{Currency, ExistenceRequirement, ExistenceRequirement::KeepAlive}, }; impl, I: 'static> Pallet { @@ -39,4 +39,92 @@ impl, I: 'static> Pallet { } Ok(()) } + + pub(crate) fn do_set_price( + collection: T::CollectionId, + item: T::ItemId, + sender: T::AccountId, + price: Option>, + whitelisted_buyer: Option, + ) -> DispatchResult { + ensure!( + Self::is_pallet_feature_enabled(PalletFeature::Trading), + Error::::MethodDisabled + ); + + let details = Item::::get(&collection, &item).ok_or(Error::::UnknownItem)?; + ensure!(details.owner == sender, Error::::NoPermission); + + let collection_config = Self::get_collection_config(&collection)?; + ensure!( + collection_config.is_setting_enabled(CollectionSetting::TransferableItems), + Error::::ItemsNonTransferable + ); + + let item_config = Self::get_item_config(&collection, &item)?; + ensure!( + item_config.is_setting_enabled(ItemSetting::Transferable), + Error::::ItemLocked + ); + + if let Some(ref price) = price { + ItemPriceOf::::insert(&collection, &item, (price, whitelisted_buyer.clone())); + Self::deposit_event(Event::ItemPriceSet { + collection, + item, + price: *price, + whitelisted_buyer, + }); + } else { + ItemPriceOf::::remove(&collection, &item); + Self::deposit_event(Event::ItemPriceRemoved { collection, item }); + } + + Ok(()) + } + + pub(crate) fn do_buy_item( + collection: T::CollectionId, + item: T::ItemId, + buyer: T::AccountId, + bid_price: ItemPrice, + ) -> DispatchResult { + ensure!( + Self::is_pallet_feature_enabled(PalletFeature::Trading), + Error::::MethodDisabled + ); + + let details = Item::::get(&collection, &item).ok_or(Error::::UnknownItem)?; + ensure!(details.owner != buyer, Error::::NoPermission); + + let price_info = + ItemPriceOf::::get(&collection, &item).ok_or(Error::::NotForSale)?; + + ensure!(bid_price >= price_info.0, Error::::BidTooLow); + + if let Some(only_buyer) = price_info.1 { + ensure!(only_buyer == buyer, Error::::NoPermission); + } + + T::Currency::transfer( + &buyer, + &details.owner, + price_info.0, + ExistenceRequirement::KeepAlive, + )?; + + let old_owner = details.owner.clone(); + + Self::do_transfer(collection, item, buyer.clone(), |_, _| Ok(()))?; + + Self::deposit_event(Event::ItemBought { + collection, + item, + price: price_info.0, + seller: old_owner, + buyer, + }); + + Ok(()) + } } diff --git a/frame/nfts/src/features/create_delete_collection.rs b/frame/nfts/src/features/create_delete_collection.rs new file mode 100644 index 0000000000000..b9530e88b18cd --- /dev/null +++ b/frame/nfts/src/features/create_delete_collection.rs @@ -0,0 +1,109 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::*; +use frame_support::pallet_prelude::*; + +impl, I: 'static> Pallet { + pub fn do_create_collection( + collection: T::CollectionId, + owner: T::AccountId, + admin: T::AccountId, + config: CollectionConfigFor, + deposit: DepositBalanceOf, + event: Event, + ) -> DispatchResult { + ensure!(!Collection::::contains_key(collection), Error::::CollectionIdInUse); + + T::Currency::reserve(&owner, deposit)?; + + Collection::::insert( + collection, + CollectionDetails { + owner: owner.clone(), + total_deposit: deposit, + items: 0, + item_metadatas: 0, + attributes: 0, + }, + ); + CollectionRoleOf::::insert( + collection, + admin, + CollectionRoles( + CollectionRole::Admin | CollectionRole::Freezer | CollectionRole::Issuer, + ), + ); + + let next_id = collection.increment(); + + CollectionConfigOf::::insert(&collection, config); + CollectionAccount::::insert(&owner, &collection, ()); + NextCollectionId::::set(Some(next_id)); + + Self::deposit_event(Event::NextCollectionIdIncremented { next_id }); + Self::deposit_event(event); + Ok(()) + } + + pub fn do_destroy_collection( + collection: T::CollectionId, + witness: DestroyWitness, + maybe_check_owner: Option, + ) -> Result { + Collection::::try_mutate_exists(collection, |maybe_details| { + let collection_details = + maybe_details.take().ok_or(Error::::UnknownCollection)?; + if let Some(check_owner) = maybe_check_owner { + ensure!(collection_details.owner == check_owner, Error::::NoPermission); + } + ensure!(collection_details.items == witness.items, Error::::BadWitness); + ensure!( + collection_details.item_metadatas == witness.item_metadatas, + Error::::BadWitness + ); + ensure!(collection_details.attributes == witness.attributes, Error::::BadWitness); + + for (item, details) in Item::::drain_prefix(&collection) { + Account::::remove((&details.owner, &collection, &item)); + T::Currency::unreserve(&details.deposit.account, details.deposit.amount); + } + #[allow(deprecated)] + ItemMetadataOf::::remove_prefix(&collection, None); + #[allow(deprecated)] + ItemPriceOf::::remove_prefix(&collection, None); + #[allow(deprecated)] + PendingSwapOf::::remove_prefix(&collection, None); + CollectionMetadataOf::::remove(&collection); + Self::clear_roles(&collection)?; + #[allow(deprecated)] + Attribute::::remove_prefix((&collection,), None); + CollectionAccount::::remove(&collection_details.owner, &collection); + T::Currency::unreserve(&collection_details.owner, collection_details.total_deposit); + CollectionConfigOf::::remove(&collection); + let _ = ItemConfigOf::::clear_prefix(&collection, witness.items, None); + + Self::deposit_event(Event::Destroyed { collection }); + + Ok(DestroyWitness { + items: collection_details.items, + item_metadatas: collection_details.item_metadatas, + attributes: collection_details.attributes, + }) + }) + } +} diff --git a/frame/nfts/src/features/create_delete_item.rs b/frame/nfts/src/features/create_delete_item.rs new file mode 100644 index 0000000000000..10670f4b10c1c --- /dev/null +++ b/frame/nfts/src/features/create_delete_item.rs @@ -0,0 +1,123 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::*; +use frame_support::pallet_prelude::*; + +impl, I: 'static> Pallet { + pub fn do_mint( + collection: T::CollectionId, + item: T::ItemId, + owner: T::AccountId, + item_config: ItemConfig, + deposit_collection_owner: bool, + with_details_and_config: impl FnOnce( + &CollectionDetailsFor, + &CollectionConfigFor, + ) -> DispatchResult, + ) -> DispatchResult { + ensure!(!Item::::contains_key(collection, item), Error::::AlreadyExists); + + Collection::::try_mutate( + &collection, + |maybe_collection_details| -> DispatchResult { + let collection_details = + maybe_collection_details.as_mut().ok_or(Error::::UnknownCollection)?; + + let collection_config = Self::get_collection_config(&collection)?; + with_details_and_config(collection_details, &collection_config)?; + + if let Some(max_supply) = collection_config.max_supply { + ensure!(collection_details.items < max_supply, Error::::MaxSupplyReached); + } + + let items = + collection_details.items.checked_add(1).ok_or(ArithmeticError::Overflow)?; + collection_details.items = items; + + let collection_config = Self::get_collection_config(&collection)?; + let deposit_amount = match collection_config + .is_setting_enabled(CollectionSetting::DepositRequired) + { + true => T::ItemDeposit::get(), + false => Zero::zero(), + }; + let deposit_account = match deposit_collection_owner { + true => collection_details.owner.clone(), + false => owner.clone(), + }; + + let owner = owner.clone(); + Account::::insert((&owner, &collection, &item), ()); + + if let Ok(existing_config) = ItemConfigOf::::try_get(&collection, &item) { + ensure!(existing_config == item_config, Error::::InconsistentItemConfig); + } else { + ItemConfigOf::::insert(&collection, &item, item_config); + } + + T::Currency::reserve(&deposit_account, deposit_amount)?; + + let deposit = ItemDeposit { account: deposit_account, amount: deposit_amount }; + let details = + ItemDetails { owner, approvals: ApprovalsOf::::default(), deposit }; + Item::::insert(&collection, &item, details); + Ok(()) + }, + )?; + + Self::deposit_event(Event::Issued { collection, item, owner }); + Ok(()) + } + + pub fn do_burn( + collection: T::CollectionId, + item: T::ItemId, + with_details: impl FnOnce(&ItemDetailsFor) -> DispatchResult, + ) -> DispatchResult { + let owner = Collection::::try_mutate( + &collection, + |maybe_collection_details| -> Result { + let collection_details = + maybe_collection_details.as_mut().ok_or(Error::::UnknownCollection)?; + let details = Item::::get(&collection, &item) + .ok_or(Error::::UnknownCollection)?; + with_details(&details)?; + + // Return the deposit. + T::Currency::unreserve(&details.deposit.account, details.deposit.amount); + collection_details.items.saturating_dec(); + Ok(details.owner) + }, + )?; + + Item::::remove(&collection, &item); + Account::::remove((&owner, &collection, &item)); + ItemPriceOf::::remove(&collection, &item); + PendingSwapOf::::remove(&collection, &item); + + // NOTE: if item's settings are not empty (e.g. item's metadata is locked) + // then we keep the record and don't remove it + let config = Self::get_item_config(&collection, &item)?; + if !config.has_disabled_settings() { + ItemConfigOf::::remove(&collection, &item); + } + + Self::deposit_event(Event::Burned { collection, item, owner }); + Ok(()) + } +} diff --git a/frame/nfts/src/features/lock.rs b/frame/nfts/src/features/lock.rs index 50420d8e3de87..e96a30dfd2c7c 100644 --- a/frame/nfts/src/features/lock.rs +++ b/frame/nfts/src/features/lock.rs @@ -22,23 +22,21 @@ impl, I: 'static> Pallet { pub(crate) fn do_lock_collection( origin: T::AccountId, collection: T::CollectionId, - lock_config: CollectionConfig, + lock_settings: CollectionSettings, ) -> DispatchResult { ensure!( Self::has_role(&collection, &origin, CollectionRole::Freezer), Error::::NoPermission ); + ensure!( + !lock_settings.is_disabled(CollectionSetting::DepositRequired), + Error::::WrongSetting + ); CollectionConfigOf::::try_mutate(collection, |maybe_config| { let config = maybe_config.as_mut().ok_or(Error::::NoConfig)?; - if lock_config.has_disabled_setting(CollectionSetting::TransferableItems) { - config.disable_setting(CollectionSetting::TransferableItems); - } - if lock_config.has_disabled_setting(CollectionSetting::UnlockedMetadata) { - config.disable_setting(CollectionSetting::UnlockedMetadata); - } - if lock_config.has_disabled_setting(CollectionSetting::UnlockedAttributes) { - config.disable_setting(CollectionSetting::UnlockedAttributes); + for setting in lock_settings.get_disabled() { + config.disable_setting(setting); } Self::deposit_event(Event::::CollectionLocked { collection }); diff --git a/frame/nfts/src/features/metadata.rs b/frame/nfts/src/features/metadata.rs new file mode 100644 index 0000000000000..0b0a337197d9b --- /dev/null +++ b/frame/nfts/src/features/metadata.rs @@ -0,0 +1,173 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::*; +use frame_support::pallet_prelude::*; + +impl, I: 'static> Pallet { + pub(crate) fn do_set_item_metadata( + maybe_check_owner: Option, + collection: T::CollectionId, + item: T::ItemId, + data: BoundedVec, + ) -> DispatchResult { + let mut collection_details = + Collection::::get(&collection).ok_or(Error::::UnknownCollection)?; + + let item_config = Self::get_item_config(&collection, &item)?; + ensure!( + maybe_check_owner.is_none() || + item_config.is_setting_enabled(ItemSetting::UnlockedMetadata), + Error::::LockedItemMetadata + ); + + if let Some(check_owner) = &maybe_check_owner { + ensure!(check_owner == &collection_details.owner, Error::::NoPermission); + } + + let collection_config = Self::get_collection_config(&collection)?; + + ItemMetadataOf::::try_mutate_exists(collection, item, |metadata| { + if metadata.is_none() { + collection_details.item_metadatas.saturating_inc(); + } + let old_deposit = metadata.take().map_or(Zero::zero(), |m| m.deposit); + collection_details.total_deposit.saturating_reduce(old_deposit); + let mut deposit = Zero::zero(); + if collection_config.is_setting_enabled(CollectionSetting::DepositRequired) && + maybe_check_owner.is_some() + { + deposit = T::DepositPerByte::get() + .saturating_mul(((data.len()) as u32).into()) + .saturating_add(T::MetadataDepositBase::get()); + } + if deposit > old_deposit { + T::Currency::reserve(&collection_details.owner, deposit - old_deposit)?; + } else if deposit < old_deposit { + T::Currency::unreserve(&collection_details.owner, old_deposit - deposit); + } + collection_details.total_deposit.saturating_accrue(deposit); + + *metadata = Some(ItemMetadata { deposit, data: data.clone() }); + + Collection::::insert(&collection, &collection_details); + Self::deposit_event(Event::MetadataSet { collection, item, data }); + Ok(()) + }) + } + + pub(crate) fn do_clear_item_metadata( + maybe_check_owner: Option, + collection: T::CollectionId, + item: T::ItemId, + ) -> DispatchResult { + let mut collection_details = + Collection::::get(&collection).ok_or(Error::::UnknownCollection)?; + if let Some(check_owner) = &maybe_check_owner { + ensure!(check_owner == &collection_details.owner, Error::::NoPermission); + } + + // NOTE: if the item was previously burned, the ItemConfigOf record might not exist + let is_locked = Self::get_item_config(&collection, &item) + .map_or(false, |c| c.has_disabled_setting(ItemSetting::UnlockedMetadata)); + + ensure!(maybe_check_owner.is_none() || !is_locked, Error::::LockedItemMetadata); + + ItemMetadataOf::::try_mutate_exists(collection, item, |metadata| { + if metadata.is_some() { + collection_details.item_metadatas.saturating_dec(); + } + let deposit = metadata.take().ok_or(Error::::UnknownItem)?.deposit; + T::Currency::unreserve(&collection_details.owner, deposit); + collection_details.total_deposit.saturating_reduce(deposit); + + Collection::::insert(&collection, &collection_details); + Self::deposit_event(Event::MetadataCleared { collection, item }); + Ok(()) + }) + } + + pub(crate) fn do_set_collection_metadata( + maybe_check_owner: Option, + collection: T::CollectionId, + data: BoundedVec, + ) -> DispatchResult { + let collection_config = Self::get_collection_config(&collection)?; + ensure!( + maybe_check_owner.is_none() || + collection_config.is_setting_enabled(CollectionSetting::UnlockedMetadata), + Error::::LockedCollectionMetadata + ); + + let mut details = + Collection::::get(&collection).ok_or(Error::::UnknownCollection)?; + if let Some(check_owner) = &maybe_check_owner { + ensure!(check_owner == &details.owner, Error::::NoPermission); + } + + CollectionMetadataOf::::try_mutate_exists(collection, |metadata| { + let old_deposit = metadata.take().map_or(Zero::zero(), |m| m.deposit); + details.total_deposit.saturating_reduce(old_deposit); + let mut deposit = Zero::zero(); + if maybe_check_owner.is_some() && + collection_config.is_setting_enabled(CollectionSetting::DepositRequired) + { + deposit = T::DepositPerByte::get() + .saturating_mul(((data.len()) as u32).into()) + .saturating_add(T::MetadataDepositBase::get()); + } + if deposit > old_deposit { + T::Currency::reserve(&details.owner, deposit - old_deposit)?; + } else if deposit < old_deposit { + T::Currency::unreserve(&details.owner, old_deposit - deposit); + } + details.total_deposit.saturating_accrue(deposit); + + Collection::::insert(&collection, details); + + *metadata = Some(CollectionMetadata { deposit, data: data.clone() }); + + Self::deposit_event(Event::CollectionMetadataSet { collection, data }); + Ok(()) + }) + } + + pub(crate) fn do_clear_collection_metadata( + maybe_check_owner: Option, + collection: T::CollectionId, + ) -> DispatchResult { + let details = + Collection::::get(&collection).ok_or(Error::::UnknownCollection)?; + if let Some(check_owner) = &maybe_check_owner { + ensure!(check_owner == &details.owner, Error::::NoPermission); + } + + let collection_config = Self::get_collection_config(&collection)?; + ensure!( + maybe_check_owner.is_none() || + collection_config.is_setting_enabled(CollectionSetting::UnlockedMetadata), + Error::::LockedCollectionMetadata + ); + + CollectionMetadataOf::::try_mutate_exists(collection, |metadata| { + let deposit = metadata.take().ok_or(Error::::UnknownCollection)?.deposit; + T::Currency::unreserve(&details.owner, deposit); + Self::deposit_event(Event::CollectionMetadataCleared { collection }); + Ok(()) + }) + } +} diff --git a/frame/nfts/src/features/mod.rs b/frame/nfts/src/features/mod.rs index f814d696d774b..b77ee9bf2491b 100644 --- a/frame/nfts/src/features/mod.rs +++ b/frame/nfts/src/features/mod.rs @@ -15,8 +15,14 @@ // See the License for the specific language governing permissions and // limitations under the License. +pub mod approvals; pub mod atomic_swap; +pub mod attributes; pub mod buy_sell; +pub mod create_delete_collection; +pub mod create_delete_item; pub mod lock; +pub mod metadata; pub mod roles; pub mod settings; +pub mod transfer; diff --git a/frame/nfts/src/features/roles.rs b/frame/nfts/src/features/roles.rs index e961779725b6e..d6be9965a5e74 100644 --- a/frame/nfts/src/features/roles.rs +++ b/frame/nfts/src/features/roles.rs @@ -20,6 +20,36 @@ use frame_support::pallet_prelude::*; use sp_std::collections::btree_map::BTreeMap; impl, I: 'static> Pallet { + pub(crate) fn do_set_team( + maybe_check_owner: Option, + collection: T::CollectionId, + issuer: T::AccountId, + admin: T::AccountId, + freezer: T::AccountId, + ) -> DispatchResult { + Collection::::try_mutate(collection, |maybe_details| { + let details = maybe_details.as_mut().ok_or(Error::::UnknownCollection)?; + if let Some(check_origin) = maybe_check_owner { + ensure!(check_origin == details.owner, Error::::NoPermission); + } + + // delete previous values + Self::clear_roles(&collection)?; + + let account_to_role = Self::group_roles_by_account(vec![ + (issuer.clone(), CollectionRole::Issuer), + (admin.clone(), CollectionRole::Admin), + (freezer.clone(), CollectionRole::Freezer), + ]); + for (account, roles) in account_to_role { + CollectionRoleOf::::insert(&collection, &account, roles); + } + + Self::deposit_event(Event::TeamChanged { collection, issuer, admin, freezer }); + Ok(()) + }) + } + /// Clears all the roles in a specified collection. /// /// - `collection_id`: A collection to clear the roles in. diff --git a/frame/nfts/src/features/settings.rs b/frame/nfts/src/features/settings.rs index 2596d360d8dcd..5f408ed183c35 100644 --- a/frame/nfts/src/features/settings.rs +++ b/frame/nfts/src/features/settings.rs @@ -18,16 +18,72 @@ use crate::*; use frame_support::pallet_prelude::*; -/// The helper methods bellow allow to read and validate different -/// collection/item/pallet settings. -/// For example, those settings allow to disable NFTs trading on a pallet level, or for a particular -/// collection, or for a specific item. impl, I: 'static> Pallet { + pub(crate) fn do_force_collection_config( + collection: T::CollectionId, + config: CollectionConfigFor, + ) -> DispatchResult { + ensure!(Collection::::contains_key(&collection), Error::::UnknownCollection); + CollectionConfigOf::::insert(&collection, config); + Self::deposit_event(Event::CollectionConfigChanged { collection }); + Ok(()) + } + + pub(crate) fn do_set_collection_max_supply( + maybe_check_owner: Option, + collection: T::CollectionId, + max_supply: u32, + ) -> DispatchResult { + let collection_config = Self::get_collection_config(&collection)?; + ensure!( + collection_config.is_setting_enabled(CollectionSetting::UnlockedMaxSupply), + Error::::MaxSupplyLocked + ); + + let details = + Collection::::get(&collection).ok_or(Error::::UnknownCollection)?; + if let Some(check_owner) = &maybe_check_owner { + ensure!(check_owner == &details.owner, Error::::NoPermission); + } + + ensure!(details.items <= max_supply, Error::::MaxSupplyTooSmall); + + CollectionConfigOf::::try_mutate(collection, |maybe_config| { + let config = maybe_config.as_mut().ok_or(Error::::NoConfig)?; + config.max_supply = Some(max_supply); + Self::deposit_event(Event::CollectionMaxSupplySet { collection, max_supply }); + Ok(()) + }) + } + + pub(crate) fn do_update_mint_settings( + maybe_check_owner: Option, + collection: T::CollectionId, + mint_settings: MintSettings< + BalanceOf, + ::BlockNumber, + T::CollectionId, + >, + ) -> DispatchResult { + let details = + Collection::::get(&collection).ok_or(Error::::UnknownCollection)?; + if let Some(check_owner) = &maybe_check_owner { + ensure!(check_owner == &details.owner, Error::::NoPermission); + } + + CollectionConfigOf::::try_mutate(collection, |maybe_config| { + let config = maybe_config.as_mut().ok_or(Error::::NoConfig)?; + config.mint_settings = mint_settings; + Self::deposit_event(Event::CollectionMintSettingsUpdated { collection }); + Ok(()) + }) + } + pub(crate) fn get_collection_config( collection_id: &T::CollectionId, - ) -> Result { - let config = CollectionConfigOf::::get(&collection_id) - .ok_or(Error::::UnknownCollection)?; + ) -> Result, DispatchError> { + let config = + CollectionConfigOf::::get(&collection_id).ok_or(Error::::NoConfig)?; Ok(config) } diff --git a/frame/nfts/src/features/transfer.rs b/frame/nfts/src/features/transfer.rs new file mode 100644 index 0000000000000..7ebad853902a9 --- /dev/null +++ b/frame/nfts/src/features/transfer.rs @@ -0,0 +1,165 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::*; +use frame_support::pallet_prelude::*; + +impl, I: 'static> Pallet { + pub fn do_transfer( + collection: T::CollectionId, + item: T::ItemId, + dest: T::AccountId, + with_details: impl FnOnce( + &CollectionDetailsFor, + &mut ItemDetailsFor, + ) -> DispatchResult, + ) -> DispatchResult { + let collection_details = + Collection::::get(&collection).ok_or(Error::::UnknownCollection)?; + ensure!(!T::Locker::is_locked(collection, item), Error::::ItemLocked); + + let collection_config = Self::get_collection_config(&collection)?; + ensure!( + collection_config.is_setting_enabled(CollectionSetting::TransferableItems), + Error::::ItemsNonTransferable + ); + + let item_config = Self::get_item_config(&collection, &item)?; + ensure!( + item_config.is_setting_enabled(ItemSetting::Transferable), + Error::::ItemLocked + ); + + let mut details = + Item::::get(&collection, &item).ok_or(Error::::UnknownItem)?; + with_details(&collection_details, &mut details)?; + + if details.deposit.account == details.owner { + // Move the deposit to the new owner. + T::Currency::repatriate_reserved( + &details.owner, + &dest, + details.deposit.amount, + Reserved, + )?; + } + + Account::::remove((&details.owner, &collection, &item)); + Account::::insert((&dest, &collection, &item), ()); + let origin = details.owner; + details.owner = dest; + + // The approved accounts have to be reset to None, because otherwise pre-approve attack + // would be possible, where the owner can approve his second account before making the + // transaction and then claiming the item back. + details.approvals.clear(); + + Item::::insert(&collection, &item, &details); + ItemPriceOf::::remove(&collection, &item); + PendingSwapOf::::remove(&collection, &item); + + Self::deposit_event(Event::Transferred { + collection, + item, + from: origin, + to: details.owner, + }); + Ok(()) + } + + pub(crate) fn do_transfer_ownership( + origin: T::AccountId, + collection: T::CollectionId, + owner: T::AccountId, + ) -> DispatchResult { + let acceptable_collection = OwnershipAcceptance::::get(&owner); + ensure!(acceptable_collection.as_ref() == Some(&collection), Error::::Unaccepted); + + Collection::::try_mutate(collection, |maybe_details| { + let details = maybe_details.as_mut().ok_or(Error::::UnknownCollection)?; + ensure!(origin == details.owner, Error::::NoPermission); + if details.owner == owner { + return Ok(()) + } + + // Move the deposit to the new owner. + T::Currency::repatriate_reserved( + &details.owner, + &owner, + details.total_deposit, + Reserved, + )?; + CollectionAccount::::remove(&details.owner, &collection); + CollectionAccount::::insert(&owner, &collection, ()); + details.owner = owner.clone(); + OwnershipAcceptance::::remove(&owner); + + Self::deposit_event(Event::OwnerChanged { collection, new_owner: owner }); + Ok(()) + }) + } + + pub(crate) fn do_set_accept_ownership( + who: T::AccountId, + maybe_collection: Option, + ) -> DispatchResult { + let old = OwnershipAcceptance::::get(&who); + match (old.is_some(), maybe_collection.is_some()) { + (false, true) => { + frame_system::Pallet::::inc_consumers(&who)?; + }, + (true, false) => { + frame_system::Pallet::::dec_consumers(&who); + }, + _ => {}, + } + if let Some(collection) = maybe_collection.as_ref() { + OwnershipAcceptance::::insert(&who, collection); + } else { + OwnershipAcceptance::::remove(&who); + } + Self::deposit_event(Event::OwnershipAcceptanceChanged { who, maybe_collection }); + Ok(()) + } + + pub(crate) fn do_force_collection_owner( + collection: T::CollectionId, + owner: T::AccountId, + ) -> DispatchResult { + Collection::::try_mutate(collection, |maybe_details| { + let details = maybe_details.as_mut().ok_or(Error::::UnknownCollection)?; + if details.owner == owner { + return Ok(()) + } + + // Move the deposit to the new owner. + T::Currency::repatriate_reserved( + &details.owner, + &owner, + details.total_deposit, + Reserved, + )?; + + CollectionAccount::::remove(&details.owner, &collection); + CollectionAccount::::insert(&owner, &collection, ()); + details.owner = owner.clone(); + + Self::deposit_event(Event::OwnerChanged { collection, new_owner: owner }); + Ok(()) + }) + } +} diff --git a/frame/nfts/src/functions.rs b/frame/nfts/src/functions.rs deleted file mode 100644 index 90a701bc9eaa0..0000000000000 --- a/frame/nfts/src/functions.rs +++ /dev/null @@ -1,355 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2017-2022 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Various pieces of common functionality. - -use super::*; -use frame_support::{ - ensure, - traits::{ExistenceRequirement, Get}, -}; -use sp_runtime::{DispatchError, DispatchResult}; - -impl, I: 'static> Pallet { - pub fn do_transfer( - collection: T::CollectionId, - item: T::ItemId, - dest: T::AccountId, - with_details: impl FnOnce( - &CollectionDetailsFor, - &mut ItemDetailsFor, - ) -> DispatchResult, - ) -> DispatchResult { - let collection_details = - Collection::::get(&collection).ok_or(Error::::UnknownCollection)?; - ensure!(!T::Locker::is_locked(collection, item), Error::::ItemLocked); - - let collection_config = Self::get_collection_config(&collection)?; - ensure!( - collection_config.is_setting_enabled(CollectionSetting::TransferableItems), - Error::::ItemsNonTransferable - ); - - let item_config = Self::get_item_config(&collection, &item)?; - ensure!( - item_config.is_setting_enabled(ItemSetting::Transferable), - Error::::ItemLocked - ); - - let mut details = - Item::::get(&collection, &item).ok_or(Error::::UnknownItem)?; - with_details(&collection_details, &mut details)?; - - Account::::remove((&details.owner, &collection, &item)); - Account::::insert((&dest, &collection, &item), ()); - let origin = details.owner; - details.owner = dest; - - // The approved accounts have to be reset to None, because otherwise pre-approve attack - // would be possible, where the owner can approve his second account before making the - // transaction and then claiming the item back. - details.approvals.clear(); - - Item::::insert(&collection, &item, &details); - ItemPriceOf::::remove(&collection, &item); - PendingSwapOf::::remove(&collection, &item); - - Self::deposit_event(Event::Transferred { - collection, - item, - from: origin, - to: details.owner, - }); - Ok(()) - } - - pub fn do_create_collection( - collection: T::CollectionId, - owner: T::AccountId, - admin: T::AccountId, - config: CollectionConfig, - deposit: DepositBalanceOf, - event: Event, - ) -> DispatchResult { - ensure!(!Collection::::contains_key(collection), Error::::CollectionIdInUse); - - T::Currency::reserve(&owner, deposit)?; - - Collection::::insert( - collection, - CollectionDetails { - owner: owner.clone(), - total_deposit: deposit, - items: 0, - item_metadatas: 0, - attributes: 0, - }, - ); - CollectionRoleOf::::insert( - collection, - admin, - CollectionRoles( - CollectionRole::Admin | CollectionRole::Freezer | CollectionRole::Issuer, - ), - ); - - let next_id = collection.increment(); - - CollectionConfigOf::::insert(&collection, config); - CollectionAccount::::insert(&owner, &collection, ()); - NextCollectionId::::set(Some(next_id)); - - Self::deposit_event(Event::NextCollectionIdIncremented { next_id }); - Self::deposit_event(event); - Ok(()) - } - - pub fn do_destroy_collection( - collection: T::CollectionId, - witness: DestroyWitness, - maybe_check_owner: Option, - ) -> Result { - Collection::::try_mutate_exists(collection, |maybe_details| { - let collection_details = - maybe_details.take().ok_or(Error::::UnknownCollection)?; - if let Some(check_owner) = maybe_check_owner { - ensure!(collection_details.owner == check_owner, Error::::NoPermission); - } - ensure!(collection_details.items == witness.items, Error::::BadWitness); - ensure!( - collection_details.item_metadatas == witness.item_metadatas, - Error::::BadWitness - ); - ensure!(collection_details.attributes == witness.attributes, Error::::BadWitness); - - for (item, details) in Item::::drain_prefix(&collection) { - Account::::remove((&details.owner, &collection, &item)); - } - #[allow(deprecated)] - ItemMetadataOf::::remove_prefix(&collection, None); - #[allow(deprecated)] - ItemPriceOf::::remove_prefix(&collection, None); - #[allow(deprecated)] - PendingSwapOf::::remove_prefix(&collection, None); - CollectionMetadataOf::::remove(&collection); - Self::clear_roles(&collection)?; - #[allow(deprecated)] - Attribute::::remove_prefix((&collection,), None); - CollectionAccount::::remove(&collection_details.owner, &collection); - T::Currency::unreserve(&collection_details.owner, collection_details.total_deposit); - CollectionMaxSupply::::remove(&collection); - CollectionConfigOf::::remove(&collection); - let _ = ItemConfigOf::::clear_prefix(&collection, witness.items, None); - - Self::deposit_event(Event::Destroyed { collection }); - - Ok(DestroyWitness { - items: collection_details.items, - item_metadatas: collection_details.item_metadatas, - attributes: collection_details.attributes, - }) - }) - } - - pub fn do_mint( - collection: T::CollectionId, - item: T::ItemId, - owner: T::AccountId, - config: ItemConfig, - ) -> DispatchResult { - ensure!(!Item::::contains_key(collection, item), Error::::AlreadyExists); - - Collection::::try_mutate( - &collection, - |maybe_collection_details| -> DispatchResult { - let collection_details = - maybe_collection_details.as_mut().ok_or(Error::::UnknownCollection)?; - - if let Ok(max_supply) = CollectionMaxSupply::::try_get(&collection) { - ensure!(collection_details.items < max_supply, Error::::MaxSupplyReached); - } - - let items = - collection_details.items.checked_add(1).ok_or(ArithmeticError::Overflow)?; - collection_details.items = items; - - let collection_config = Self::get_collection_config(&collection)?; - let deposit = match collection_config - .is_setting_enabled(CollectionSetting::DepositRequired) - { - true => T::ItemDeposit::get(), - false => Zero::zero(), - }; - T::Currency::reserve(&collection_details.owner, deposit)?; - collection_details.total_deposit += deposit; - - let owner = owner.clone(); - Account::::insert((&owner, &collection, &item), ()); - - if let Ok(existing_config) = ItemConfigOf::::try_get(&collection, &item) { - ensure!(existing_config == config, Error::::InconsistentItemConfig); - } else { - ItemConfigOf::::insert(&collection, &item, config); - } - - let details = - ItemDetails { owner, approvals: ApprovalsOf::::default(), deposit }; - Item::::insert(&collection, &item, details); - Ok(()) - }, - )?; - - Self::deposit_event(Event::Issued { collection, item, owner }); - Ok(()) - } - - pub fn do_burn( - collection: T::CollectionId, - item: T::ItemId, - with_details: impl FnOnce(&ItemDetailsFor) -> DispatchResult, - ) -> DispatchResult { - let owner = Collection::::try_mutate( - &collection, - |maybe_collection_details| -> Result { - let collection_details = - maybe_collection_details.as_mut().ok_or(Error::::UnknownCollection)?; - let details = Item::::get(&collection, &item) - .ok_or(Error::::UnknownCollection)?; - with_details(&details)?; - - // Return the deposit. - T::Currency::unreserve(&collection_details.owner, details.deposit); - collection_details.total_deposit.saturating_reduce(details.deposit); - collection_details.items.saturating_dec(); - Ok(details.owner) - }, - )?; - - Item::::remove(&collection, &item); - Account::::remove((&owner, &collection, &item)); - ItemPriceOf::::remove(&collection, &item); - PendingSwapOf::::remove(&collection, &item); - - // NOTE: if item's settings are not empty (e.g. item's metadata is locked) - // then we keep the record and don't remove it - let config = Self::get_item_config(&collection, &item)?; - if !config.has_disabled_settings() { - ItemConfigOf::::remove(&collection, &item); - } - - Self::deposit_event(Event::Burned { collection, item, owner }); - Ok(()) - } - - pub fn do_set_price( - collection: T::CollectionId, - item: T::ItemId, - sender: T::AccountId, - price: Option>, - whitelisted_buyer: Option, - ) -> DispatchResult { - ensure!( - Self::is_pallet_feature_enabled(PalletFeature::Trading), - Error::::MethodDisabled - ); - - let details = Item::::get(&collection, &item).ok_or(Error::::UnknownItem)?; - ensure!(details.owner == sender, Error::::NoPermission); - - let collection_config = Self::get_collection_config(&collection)?; - ensure!( - collection_config.is_setting_enabled(CollectionSetting::TransferableItems), - Error::::ItemsNonTransferable - ); - - let item_config = Self::get_item_config(&collection, &item)?; - ensure!( - item_config.is_setting_enabled(ItemSetting::Transferable), - Error::::ItemLocked - ); - - if let Some(ref price) = price { - ItemPriceOf::::insert(&collection, &item, (price, whitelisted_buyer.clone())); - Self::deposit_event(Event::ItemPriceSet { - collection, - item, - price: *price, - whitelisted_buyer, - }); - } else { - ItemPriceOf::::remove(&collection, &item); - Self::deposit_event(Event::ItemPriceRemoved { collection, item }); - } - - Ok(()) - } - - pub fn do_buy_item( - collection: T::CollectionId, - item: T::ItemId, - buyer: T::AccountId, - bid_price: ItemPrice, - ) -> DispatchResult { - ensure!( - Self::is_pallet_feature_enabled(PalletFeature::Trading), - Error::::MethodDisabled - ); - - let details = Item::::get(&collection, &item).ok_or(Error::::UnknownItem)?; - ensure!(details.owner != buyer, Error::::NoPermission); - - let price_info = - ItemPriceOf::::get(&collection, &item).ok_or(Error::::NotForSale)?; - - ensure!(bid_price >= price_info.0, Error::::BidTooLow); - - if let Some(only_buyer) = price_info.1 { - ensure!(only_buyer == buyer, Error::::NoPermission); - } - - T::Currency::transfer( - &buyer, - &details.owner, - price_info.0, - ExistenceRequirement::KeepAlive, - )?; - - let old_owner = details.owner.clone(); - - Self::do_transfer(collection, item, buyer.clone(), |_, _| Ok(()))?; - - Self::deposit_event(Event::ItemBought { - collection, - item, - price: price_info.0, - seller: old_owner, - buyer, - }); - - Ok(()) - } - - #[cfg(any(test, feature = "runtime-benchmarks"))] - pub fn set_next_id(id: T::CollectionId) { - NextCollectionId::::set(Some(id)); - } - - #[cfg(test)] - pub fn get_next_id() -> T::CollectionId { - NextCollectionId::::get().unwrap_or(T::CollectionId::initial_value()) - } -} diff --git a/frame/nfts/src/impl_nonfungibles.rs b/frame/nfts/src/impl_nonfungibles.rs index 210fe4831991d..b42147e6687d9 100644 --- a/frame/nfts/src/impl_nonfungibles.rs +++ b/frame/nfts/src/impl_nonfungibles.rs @@ -92,7 +92,7 @@ impl, I: 'static> Inspect<::AccountId> for Palle } } -impl, I: 'static> Create<::AccountId, CollectionConfig> +impl, I: 'static> Create<::AccountId, CollectionConfigFor> for Pallet { /// Create a `collection` of nonfungible items to be owned by `who` and managed by `admin`. @@ -100,7 +100,7 @@ impl, I: 'static> Create<::AccountId, Collection collection: &Self::CollectionId, who: &T::AccountId, admin: &T::AccountId, - config: &CollectionConfig, + config: &CollectionConfigFor, ) -> DispatchResult { // DepositRequired can be disabled by calling the force_create() only ensure!( @@ -134,16 +134,22 @@ impl, I: 'static> Destroy<::AccountId> for Palle } } -impl, I: 'static> Mutate<::AccountId, ItemSettings> - for Pallet -{ +impl, I: 'static> Mutate<::AccountId, ItemConfig> for Pallet { fn mint_into( collection: &Self::CollectionId, item: &Self::ItemId, who: &T::AccountId, - settings: &ItemSettings, + item_config: &ItemConfig, + deposit_collection_owner: bool, ) -> DispatchResult { - Self::do_mint(*collection, *item, who.clone(), ItemConfig(*settings)) + Self::do_mint( + *collection, + *item, + who.clone(), + *item_config, + deposit_collection_owner, + |_, _| Ok(()), + ) } fn burn( diff --git a/frame/nfts/src/lib.rs b/frame/nfts/src/lib.rs index 00fb6dc238f86..0f3d3c89c2932 100644 --- a/frame/nfts/src/lib.rs +++ b/frame/nfts/src/lib.rs @@ -35,8 +35,8 @@ pub mod mock; #[cfg(test)] mod tests; +mod common_functions; mod features; -mod functions; mod impl_nonfungibles; mod types; @@ -66,7 +66,7 @@ type AccountIdLookupOf = <::Lookup as StaticLookup>::Sourc #[frame_support::pallet] pub mod pallet { use super::*; - use frame_support::pallet_prelude::*; + use frame_support::{pallet_prelude::*, traits::ExistenceRequirement}; use frame_system::pallet_prelude::*; #[pallet::pallet] @@ -238,7 +238,7 @@ pub mod pallet { T::CollectionId, Blake2_128Concat, T::ItemId, - ItemDetails, ApprovalsOf>, + ItemDetails, ApprovalsOf>, OptionQuery, >; @@ -289,11 +289,6 @@ pub mod pallet { OptionQuery, >; - /// Keeps track of the number of items a collection might have. - #[pallet::storage] - pub(super) type CollectionMaxSupply, I: 'static = ()> = - StorageMap<_, Blake2_128Concat, T::CollectionId, u32, OptionQuery>; - /// Stores the `CollectionId` that is going to be used for the next collection. /// This gets incremented by 1 whenever a new collection is created. #[pallet::storage] @@ -320,7 +315,7 @@ pub mod pallet { /// Config of a collection. #[pallet::storage] pub(super) type CollectionConfigOf, I: 'static = ()> = - StorageMap<_, Blake2_128Concat, T::CollectionId, CollectionConfig, OptionQuery>; + StorageMap<_, Blake2_128Concat, T::CollectionId, CollectionConfigFor, OptionQuery>; /// Config of an item. #[pallet::storage] @@ -395,8 +390,8 @@ pub mod pallet { }, /// All approvals of an item got cancelled. AllApprovalsCancelled { collection: T::CollectionId, item: T::ItemId, owner: T::AccountId }, - /// A `collection` has had its attributes changed by the `Force` origin. - CollectionStatusChanged { collection: T::CollectionId }, + /// A `collection` has had its config changed by the `Force` origin. + CollectionConfigChanged { collection: T::CollectionId }, /// New metadata has been set for a `collection`. CollectionMetadataSet { collection: T::CollectionId, data: BoundedVec }, /// Metadata has been cleared for a `collection`. @@ -428,10 +423,10 @@ pub mod pallet { OwnershipAcceptanceChanged { who: T::AccountId, maybe_collection: Option }, /// Max supply has been set for a collection. CollectionMaxSupplySet { collection: T::CollectionId, max_supply: u32 }, + /// Mint settings for a collection had changed. + CollectionMintSettingsUpdated { collection: T::CollectionId }, /// Event gets emmited when the `NextCollectionId` gets incremented. NextCollectionIdIncremented { next_id: T::CollectionId }, - /// The config of a collection has change. - CollectionConfigChanged { id: T::CollectionId }, /// The price was set for the instance. ItemPriceSet { collection: T::CollectionId, @@ -526,8 +521,8 @@ pub mod pallet { LockedCollectionMetadata, /// All items have been minted. MaxSupplyReached, - /// The max supply has already been set. - MaxSupplyAlreadySet, + /// The max supply is locked and can't be changed. + MaxSupplyLocked, /// The provided max supply is less to the amount of items a collection already has. MaxSupplyTooSmall, /// The given item ID is unknown. @@ -554,18 +549,10 @@ pub mod pallet { NoConfig, /// Some roles were not cleared. RolesNotCleared, - } - - impl, I: 'static> Pallet { - /// Get the owner of the item, if the item exists. - pub fn owner(collection: T::CollectionId, item: T::ItemId) -> Option { - Item::::get(collection, item).map(|i| i.owner) - } - - /// Get the owner of the item, if the item exists. - pub fn collection_owner(collection: T::CollectionId) -> Option { - Collection::::get(collection).map(|i| i.owner) - } + /// Mint has not started yet. + MintNotStated, + /// Mint has already ended. + MintEnded, } #[pallet::call] @@ -589,7 +576,7 @@ pub mod pallet { pub fn create( origin: OriginFor, admin: AccountIdLookupOf, - config: CollectionConfig, + config: CollectionConfigFor, ) -> DispatchResult { let collection = NextCollectionId::::get().unwrap_or(T::CollectionId::initial_value()); @@ -633,7 +620,7 @@ pub mod pallet { pub fn force_create( origin: OriginFor, owner: AccountIdLookupOf, - config: CollectionConfig, + config: CollectionConfigFor, ) -> DispatchResult { T::ForceOrigin::ensure_origin(origin)?; let owner = T::Lookup::lookup(owner)?; @@ -676,10 +663,9 @@ pub mod pallet { collection: T::CollectionId, witness: DestroyWitness, ) -> DispatchResultWithPostInfo { - let maybe_check_owner = match T::ForceOrigin::try_origin(origin) { - Ok(_) => None, - Err(origin) => Some(ensure_signed(origin)?), - }; + let maybe_check_owner = T::ForceOrigin::try_origin(origin) + .map(|_| None) + .or_else(|origin| ensure_signed(origin).map(Some).map_err(DispatchError::from))?; let details = Self::do_destroy_collection(collection, witness, maybe_check_owner)?; Ok(Some(T::WeightInfo::destroy( @@ -695,28 +681,112 @@ pub mod pallet { /// The origin must be Signed and the sender must be the Issuer of the `collection`. /// /// - `collection`: The collection of the item to be minted. - /// - `item`: The item value of the item to be minted. - /// - `beneficiary`: The initial owner of the minted item. + /// - `item`: An identifier of the new item. + /// - `witness_data`: When the mint type is `HolderOf(collection_id)`, then the owned + /// item_id from that collection needs to be provided within the witness data object. /// /// Emits `Issued` event when successful. /// /// Weight: `O(1)` #[pallet::weight(T::WeightInfo::mint())] pub fn mint( + origin: OriginFor, + collection: T::CollectionId, + item: T::ItemId, + witness_data: Option>, + ) -> DispatchResult { + let caller = ensure_signed(origin)?; + + let collection_config = Self::get_collection_config(&collection)?; + let item_settings = collection_config.mint_settings.default_item_settings; + let item_config = ItemConfig { settings: item_settings }; + + Self::do_mint( + collection, + item, + caller.clone(), + item_config, + false, + |collection_details, collection_config| { + let mint_settings = collection_config.mint_settings; + let now = frame_system::Pallet::::block_number(); + + if let Some(start_block) = mint_settings.start_block { + ensure!(start_block <= now, Error::::MintNotStated); + } + if let Some(end_block) = mint_settings.end_block { + ensure!(end_block >= now, Error::::MintEnded); + } + + match mint_settings.mint_type { + MintType::Issuer => { + ensure!( + Self::has_role(&collection, &caller, CollectionRole::Issuer), + Error::::NoPermission + ) + }, + MintType::HolderOf(collection_id) => { + let correct_witness = match witness_data { + Some(MintWitness { owner_of_item }) => + Account::::contains_key(( + &caller, + &collection_id, + &owner_of_item, + )), + None => false, + }; + ensure!(correct_witness, Error::::BadWitness) + }, + _ => {}, + } + + if let Some(price) = mint_settings.price { + T::Currency::transfer( + &caller, + &collection_details.owner, + price, + ExistenceRequirement::KeepAlive, + )?; + } + + Ok(()) + }, + ) + } + + /// Mint an item of a particular collection from a privileged origin. + /// + /// The origin must conform to `ForceOrigin` or must be `Signed` and the sender must be the + /// Issuer of the `collection`. + /// + /// - `collection`: The collection of the item to be minted. + /// - `item`: An identifier of the new item. + /// - `owner`: An owner of the minted item. + /// - `item_config`: A config of the new item. + /// + /// Emits `Issued` event when successful. + /// + /// Weight: `O(1)` + #[pallet::weight(T::WeightInfo::force_mint())] + pub fn force_mint( origin: OriginFor, collection: T::CollectionId, item: T::ItemId, owner: AccountIdLookupOf, - config: ItemConfig, + item_config: ItemConfig, ) -> DispatchResult { - let origin = ensure_signed(origin)?; + let maybe_check_origin = T::ForceOrigin::try_origin(origin) + .map(|_| None) + .or_else(|origin| ensure_signed(origin).map(Some).map_err(DispatchError::from))?; let owner = T::Lookup::lookup(owner)?; - ensure!( - Self::has_role(&collection, &origin, CollectionRole::Issuer), - Error::::NoPermission - ); - Self::do_mint(collection, item, owner, config) + if let Some(check_origin) = maybe_check_origin { + ensure!( + Self::has_role(&collection, &check_origin, CollectionRole::Issuer), + Error::::NoPermission + ); + } + Self::do_mint(collection, item, owner, item_config, true, |_, _| Ok(())) } /// Destroy a single item. @@ -818,7 +888,7 @@ pub mod pallet { ) -> DispatchResult { let origin = ensure_signed(origin)?; - let mut collection_details = + let collection_details = Collection::::get(&collection).ok_or(Error::::UnknownCollection)?; ensure!(collection_details.owner == origin, Error::::NoPermission); @@ -834,11 +904,11 @@ pub mod pallet { Some(x) => x, None => continue, }; - let old = details.deposit; + let old = details.deposit.amount; if old > deposit { - T::Currency::unreserve(&collection_details.owner, old - deposit); + T::Currency::unreserve(&details.deposit.account, old - deposit); } else if deposit > old { - if T::Currency::reserve(&collection_details.owner, deposit - old).is_err() { + if T::Currency::reserve(&details.deposit.account, deposit - old).is_err() { // NOTE: No alterations made to collection_details in this iteration so far, // so this is OK to do. continue @@ -846,13 +916,10 @@ pub mod pallet { } else { continue } - collection_details.total_deposit.saturating_accrue(deposit); - collection_details.total_deposit.saturating_reduce(old); - details.deposit = deposit; + details.deposit.amount = deposit; Item::::insert(&collection, &item, &details); successful.push(item); } - Collection::::insert(&collection, &collection_details); Self::deposit_event(Event::::Redeposited { collection, @@ -907,7 +974,7 @@ pub mod pallet { /// Origin must be Signed and the sender should be the Freezer of the `collection`. /// /// - `collection`: The collection to be locked. - /// - `lock_config`: The config with the settings to be locked. + /// - `lock_settings`: The settings to be locked. /// /// Note: it's possible to only lock(set) the setting, but not to unset it. /// Emits `CollectionLocked`. @@ -917,10 +984,10 @@ pub mod pallet { pub fn lock_collection( origin: OriginFor, collection: T::CollectionId, - lock_config: CollectionConfig, + lock_settings: CollectionSettings, ) -> DispatchResult { let origin = ensure_signed(origin)?; - Self::do_lock_collection(origin, collection, lock_config) + Self::do_lock_collection(origin, collection, lock_settings) } /// Change the Owner of a collection. @@ -942,37 +1009,13 @@ pub mod pallet { ) -> DispatchResult { let origin = ensure_signed(origin)?; let owner = T::Lookup::lookup(owner)?; - - let acceptable_collection = OwnershipAcceptance::::get(&owner); - ensure!(acceptable_collection.as_ref() == Some(&collection), Error::::Unaccepted); - - Collection::::try_mutate(collection, |maybe_details| { - let details = maybe_details.as_mut().ok_or(Error::::UnknownCollection)?; - ensure!(origin == details.owner, Error::::NoPermission); - if details.owner == owner { - return Ok(()) - } - - // Move the deposit to the new owner. - T::Currency::repatriate_reserved( - &details.owner, - &owner, - details.total_deposit, - Reserved, - )?; - CollectionAccount::::remove(&details.owner, &collection); - CollectionAccount::::insert(&owner, &collection, ()); - details.owner = owner.clone(); - OwnershipAcceptance::::remove(&owner); - - Self::deposit_event(Event::OwnerChanged { collection, new_owner: owner }); - Ok(()) - }) + Self::do_transfer_ownership(origin, collection, owner) } /// Change the Issuer, Admin and Freezer of a collection. /// - /// Origin must be Signed and the sender should be the Owner of the `collection`. + /// Origin must be either `ForceOrigin` or Signed and the sender should be the Owner of the + /// `collection`. /// /// - `collection`: The collection whose team should be changed. /// - `issuer`: The new Issuer of this collection. @@ -990,35 +1033,60 @@ pub mod pallet { admin: AccountIdLookupOf, freezer: AccountIdLookupOf, ) -> DispatchResult { - let origin = ensure_signed(origin)?; + let maybe_check_owner = T::ForceOrigin::try_origin(origin) + .map(|_| None) + .or_else(|origin| ensure_signed(origin).map(Some).map_err(DispatchError::from))?; let issuer = T::Lookup::lookup(issuer)?; let admin = T::Lookup::lookup(admin)?; let freezer = T::Lookup::lookup(freezer)?; + Self::do_set_team(maybe_check_owner, collection, issuer, admin, freezer) + } - Collection::::try_mutate(collection, |maybe_details| { - let details = maybe_details.as_mut().ok_or(Error::::UnknownCollection)?; - ensure!(origin == details.owner, Error::::NoPermission); - - // delete previous values - Self::clear_roles(&collection)?; - - let account_to_role = Self::group_roles_by_account(vec![ - (issuer.clone(), CollectionRole::Issuer), - (admin.clone(), CollectionRole::Admin), - (freezer.clone(), CollectionRole::Freezer), - ]); - for (account, roles) in account_to_role { - CollectionRoleOf::::insert(&collection, &account, roles); - } + /// Change the Owner of a collection. + /// + /// Origin must be `ForceOrigin`. + /// + /// - `collection`: The identifier of the collection. + /// - `owner`: The new Owner of this collection. + /// + /// Emits `OwnerChanged`. + /// + /// Weight: `O(1)` + #[pallet::weight(T::WeightInfo::force_collection_owner())] + pub fn force_collection_owner( + origin: OriginFor, + collection: T::CollectionId, + owner: AccountIdLookupOf, + ) -> DispatchResult { + T::ForceOrigin::ensure_origin(origin)?; + let new_owner = T::Lookup::lookup(owner)?; + Self::do_force_collection_owner(collection, new_owner) + } - Self::deposit_event(Event::TeamChanged { collection, issuer, admin, freezer }); - Ok(()) - }) + /// Change the config of a collection. + /// + /// Origin must be `ForceOrigin`. + /// + /// - `collection`: The identifier of the collection. + /// - `config`: The new config of this collection. + /// + /// Emits `CollectionConfigChanged`. + /// + /// Weight: `O(1)` + #[pallet::weight(T::WeightInfo::force_collection_config())] + pub fn force_collection_config( + origin: OriginFor, + collection: T::CollectionId, + config: CollectionConfigFor, + ) -> DispatchResult { + T::ForceOrigin::ensure_origin(origin)?; + Self::do_force_collection_config(collection, config) } /// Approve an item to be transferred by a delegated third-party account. /// - /// Origin must be Signed and must be the owner of the `item`. + /// Origin must be either `ForceOrigin` or Signed and the sender should be the Owner of the + /// `item`. /// /// - `collection`: The collection of the item to be approved for delegated transfer. /// - `item`: The item to be approved for delegated transfer. @@ -1037,55 +1105,17 @@ pub mod pallet { delegate: AccountIdLookupOf, maybe_deadline: Option<::BlockNumber>, ) -> DispatchResult { - ensure!( - Self::is_pallet_feature_enabled(PalletFeature::Approvals), - Error::::MethodDisabled - ); - let maybe_check: Option = T::ForceOrigin::try_origin(origin) + let maybe_check_origin = T::ForceOrigin::try_origin(origin) .map(|_| None) .or_else(|origin| ensure_signed(origin).map(Some).map_err(DispatchError::from))?; - let delegate = T::Lookup::lookup(delegate)?; - - let mut details = - Item::::get(&collection, &item).ok_or(Error::::UnknownItem)?; - - let collection_config = Self::get_collection_config(&collection)?; - ensure!( - collection_config.is_setting_enabled(CollectionSetting::TransferableItems), - Error::::ItemsNonTransferable - ); - - let collection_config = Self::get_collection_config(&collection)?; - ensure!( - collection_config.is_setting_enabled(CollectionSetting::TransferableItems), - Error::::ItemsNonTransferable - ); - - if let Some(check) = maybe_check { - let is_admin = Self::has_role(&collection, &check, CollectionRole::Admin); - let permitted = is_admin || check == details.owner; - ensure!(permitted, Error::::NoPermission); - } - - let now = frame_system::Pallet::::block_number(); - let deadline = maybe_deadline.map(|d| d.saturating_add(now)); - - details - .approvals - .try_insert(delegate.clone(), deadline) - .map_err(|_| Error::::ReachedApprovalLimit)?; - Item::::insert(&collection, &item, &details); - - Self::deposit_event(Event::ApprovedTransfer { + Self::do_approve_transfer( + maybe_check_origin, collection, item, - owner: details.owner, delegate, - deadline, - }); - - Ok(()) + maybe_deadline, + ) } /// Cancel one of the transfer approvals for a specific item. @@ -1110,43 +1140,11 @@ pub mod pallet { item: T::ItemId, delegate: AccountIdLookupOf, ) -> DispatchResult { - let maybe_check: Option = T::ForceOrigin::try_origin(origin) + let maybe_check_origin = T::ForceOrigin::try_origin(origin) .map(|_| None) .or_else(|origin| ensure_signed(origin).map(Some).map_err(DispatchError::from))?; - let delegate = T::Lookup::lookup(delegate)?; - - let mut details = - Item::::get(&collection, &item).ok_or(Error::::UnknownItem)?; - - let maybe_deadline = - details.approvals.get(&delegate).ok_or(Error::::NotDelegate)?; - - let is_past_deadline = if let Some(deadline) = maybe_deadline { - let now = frame_system::Pallet::::block_number(); - now > *deadline - } else { - false - }; - - if !is_past_deadline { - if let Some(check) = maybe_check { - let is_admin = Self::has_role(&collection, &check, CollectionRole::Admin); - let permitted = is_admin || check == details.owner; - ensure!(permitted, Error::::NoPermission); - } - } - - details.approvals.remove(&delegate); - Item::::insert(&collection, &item, &details); - Self::deposit_event(Event::ApprovalCancelled { - collection, - item, - owner: details.owner, - delegate, - }); - - Ok(()) + Self::do_cancel_approval(maybe_check_origin, collection, item, delegate) } /// Cancel all the approvals of a specific item. @@ -1169,86 +1167,10 @@ pub mod pallet { collection: T::CollectionId, item: T::ItemId, ) -> DispatchResult { - let maybe_check: Option = T::ForceOrigin::try_origin(origin) + let maybe_check_origin = T::ForceOrigin::try_origin(origin) .map(|_| None) .or_else(|origin| ensure_signed(origin).map(Some).map_err(DispatchError::from))?; - - let mut details = - Item::::get(&collection, &item).ok_or(Error::::UnknownCollection)?; - - if let Some(check) = maybe_check { - let is_admin = Self::has_role(&collection, &check, CollectionRole::Admin); - let permitted = is_admin || check == details.owner; - ensure!(permitted, Error::::NoPermission); - } - - details.approvals.clear(); - Item::::insert(&collection, &item, &details); - Self::deposit_event(Event::AllApprovalsCancelled { - collection, - item, - owner: details.owner, - }); - - Ok(()) - } - - /// Alter the attributes of a given collection. - /// - /// Origin must be `ForceOrigin`. - /// - /// - `collection`: The identifier of the collection. - /// - `owner`: The new Owner of this collection. - /// - `issuer`: The new Issuer of this collection. - /// - `admin`: The new Admin of this collection. - /// - `freezer`: The new Freezer of this collection. - /// - `config`: Collection's config. - /// - /// Emits `CollectionStatusChanged` with the identity of the item. - /// - /// Weight: `O(1)` - #[pallet::weight(T::WeightInfo::force_collection_status())] - pub fn force_collection_status( - origin: OriginFor, - collection: T::CollectionId, - owner: AccountIdLookupOf, - issuer: AccountIdLookupOf, - admin: AccountIdLookupOf, - freezer: AccountIdLookupOf, - config: CollectionConfig, - ) -> DispatchResult { - T::ForceOrigin::ensure_origin(origin)?; - - Collection::::try_mutate(collection, |maybe_collection| { - let mut collection_info = - maybe_collection.take().ok_or(Error::::UnknownCollection)?; - let old_owner = collection_info.owner; - let new_owner = T::Lookup::lookup(owner)?; - collection_info.owner = new_owner.clone(); - *maybe_collection = Some(collection_info); - CollectionAccount::::remove(&old_owner, &collection); - CollectionAccount::::insert(&new_owner, &collection, ()); - CollectionConfigOf::::insert(&collection, config); - - let issuer = T::Lookup::lookup(issuer)?; - let admin = T::Lookup::lookup(admin)?; - let freezer = T::Lookup::lookup(freezer)?; - - // delete previous values - Self::clear_roles(&collection)?; - - let account_to_role = Self::group_roles_by_account(vec![ - (issuer, CollectionRole::Issuer), - (admin, CollectionRole::Admin), - (freezer, CollectionRole::Freezer), - ]); - for (account, roles) in account_to_role { - CollectionRoleOf::::insert(&collection, &account, roles); - } - - Self::deposit_event(Event::CollectionStatusChanged { collection }); - Ok(()) - }) + Self::do_clear_all_transfer_approvals(maybe_check_origin, collection, item) } /// Disallows changing the metadata of attributes of the item. @@ -1274,8 +1196,7 @@ pub mod pallet { ) -> DispatchResult { let maybe_check_owner = T::ForceOrigin::try_origin(origin) .map(|_| None) - .or_else(|origin| ensure_signed(origin).map(Some))?; - + .or_else(|origin| ensure_signed(origin).map(Some).map_err(DispatchError::from))?; Self::do_lock_item_properties( maybe_check_owner, collection, @@ -1310,61 +1231,10 @@ pub mod pallet { key: BoundedVec, value: BoundedVec, ) -> DispatchResult { - ensure!( - Self::is_pallet_feature_enabled(PalletFeature::Attributes), - Error::::MethodDisabled - ); let maybe_check_owner = T::ForceOrigin::try_origin(origin) .map(|_| None) - .or_else(|origin| ensure_signed(origin).map(Some))?; - - let mut collection_details = - Collection::::get(&collection).ok_or(Error::::UnknownCollection)?; - - if let Some(check_owner) = &maybe_check_owner { - ensure!(check_owner == &collection_details.owner, Error::::NoPermission); - } - - let collection_config = Self::get_collection_config(&collection)?; - match maybe_item { - None => { - ensure!( - collection_config.is_setting_enabled(CollectionSetting::UnlockedAttributes), - Error::::LockedCollectionAttributes - ) - }, - Some(item) => { - let maybe_is_locked = Self::get_item_config(&collection, &item) - .map(|c| c.has_disabled_setting(ItemSetting::UnlockedAttributes))?; - ensure!(!maybe_is_locked, Error::::LockedItemAttributes); - }, - }; - - let attribute = Attribute::::get((collection, maybe_item, &key)); - if attribute.is_none() { - collection_details.attributes.saturating_inc(); - } - let old_deposit = attribute.map_or(Zero::zero(), |m| m.1); - collection_details.total_deposit.saturating_reduce(old_deposit); - let mut deposit = Zero::zero(); - if collection_config.is_setting_enabled(CollectionSetting::DepositRequired) && - maybe_check_owner.is_some() - { - deposit = T::DepositPerByte::get() - .saturating_mul(((key.len() + value.len()) as u32).into()) - .saturating_add(T::AttributeDepositBase::get()); - } - collection_details.total_deposit.saturating_accrue(deposit); - if deposit > old_deposit { - T::Currency::reserve(&collection_details.owner, deposit - old_deposit)?; - } else if deposit < old_deposit { - T::Currency::unreserve(&collection_details.owner, old_deposit - deposit); - } - - Attribute::::insert((&collection, maybe_item, &key), (&value, deposit)); - Collection::::insert(collection, &collection_details); - Self::deposit_event(Event::AttributeSet { collection, maybe_item, key, value }); - Ok(()) + .or_else(|origin| ensure_signed(origin).map(Some).map_err(DispatchError::from))?; + Self::do_set_attribute(maybe_check_owner, collection, maybe_item, key, value) } /// Clear an attribute for a collection or item. @@ -1390,44 +1260,8 @@ pub mod pallet { ) -> DispatchResult { let maybe_check_owner = T::ForceOrigin::try_origin(origin) .map(|_| None) - .or_else(|origin| ensure_signed(origin).map(Some))?; - - let mut collection_details = - Collection::::get(&collection).ok_or(Error::::UnknownCollection)?; - if let Some(check_owner) = &maybe_check_owner { - ensure!(check_owner == &collection_details.owner, Error::::NoPermission); - } - - if maybe_check_owner.is_some() { - match maybe_item { - None => { - let collection_config = Self::get_collection_config(&collection)?; - ensure!( - collection_config - .is_setting_enabled(CollectionSetting::UnlockedAttributes), - Error::::LockedCollectionAttributes - ) - }, - Some(item) => { - // NOTE: if the item was previously burned, the ItemSettings record might - // not exists. In that case, we allow to clear the attribute. - let maybe_is_locked = Self::get_item_config(&collection, &item) - .map_or(false, |c| { - c.has_disabled_setting(ItemSetting::UnlockedAttributes) - }); - ensure!(!maybe_is_locked, Error::::LockedItemAttributes); - }, - }; - } - - if let Some((_, deposit)) = Attribute::::take((collection, maybe_item, &key)) { - collection_details.attributes.saturating_dec(); - collection_details.total_deposit.saturating_reduce(deposit); - T::Currency::unreserve(&collection_details.owner, deposit); - Collection::::insert(collection, &collection_details); - Self::deposit_event(Event::AttributeCleared { collection, maybe_item, key }); - } - Ok(()) + .or_else(|origin| ensure_signed(origin).map(Some).map_err(DispatchError::from))?; + Self::do_clear_attribute(maybe_check_owner, collection, maybe_item, key) } /// Set the metadata for an item. @@ -1455,51 +1289,8 @@ pub mod pallet { ) -> DispatchResult { let maybe_check_owner = T::ForceOrigin::try_origin(origin) .map(|_| None) - .or_else(|origin| ensure_signed(origin).map(Some))?; - - let mut collection_details = - Collection::::get(&collection).ok_or(Error::::UnknownCollection)?; - - let item_config = Self::get_item_config(&collection, &item)?; - ensure!( - maybe_check_owner.is_none() || - item_config.is_setting_enabled(ItemSetting::UnlockedMetadata), - Error::::LockedItemMetadata - ); - - if let Some(check_owner) = &maybe_check_owner { - ensure!(check_owner == &collection_details.owner, Error::::NoPermission); - } - - let collection_config = Self::get_collection_config(&collection)?; - - ItemMetadataOf::::try_mutate_exists(collection, item, |metadata| { - if metadata.is_none() { - collection_details.item_metadatas.saturating_inc(); - } - let old_deposit = metadata.take().map_or(Zero::zero(), |m| m.deposit); - collection_details.total_deposit.saturating_reduce(old_deposit); - let mut deposit = Zero::zero(); - if collection_config.is_setting_enabled(CollectionSetting::DepositRequired) && - maybe_check_owner.is_some() - { - deposit = T::DepositPerByte::get() - .saturating_mul(((data.len()) as u32).into()) - .saturating_add(T::MetadataDepositBase::get()); - } - if deposit > old_deposit { - T::Currency::reserve(&collection_details.owner, deposit - old_deposit)?; - } else if deposit < old_deposit { - T::Currency::unreserve(&collection_details.owner, old_deposit - deposit); - } - collection_details.total_deposit.saturating_accrue(deposit); - - *metadata = Some(ItemMetadata { deposit, data: data.clone() }); - - Collection::::insert(&collection, &collection_details); - Self::deposit_event(Event::MetadataSet { collection, item, data }); - Ok(()) - }) + .or_else(|origin| ensure_signed(origin).map(Some).map_err(DispatchError::from))?; + Self::do_set_item_metadata(maybe_check_owner, collection, item, data) } /// Clear the metadata for an item. @@ -1523,32 +1314,8 @@ pub mod pallet { ) -> DispatchResult { let maybe_check_owner = T::ForceOrigin::try_origin(origin) .map(|_| None) - .or_else(|origin| ensure_signed(origin).map(Some))?; - - let mut collection_details = - Collection::::get(&collection).ok_or(Error::::UnknownCollection)?; - if let Some(check_owner) = &maybe_check_owner { - ensure!(check_owner == &collection_details.owner, Error::::NoPermission); - } - - // NOTE: if the item was previously burned, the ItemSettings record might not exists - let is_locked = Self::get_item_config(&collection, &item) - .map_or(false, |c| c.has_disabled_setting(ItemSetting::UnlockedMetadata)); - - ensure!(maybe_check_owner.is_none() || !is_locked, Error::::LockedItemMetadata); - - ItemMetadataOf::::try_mutate_exists(collection, item, |metadata| { - if metadata.is_some() { - collection_details.item_metadatas.saturating_dec(); - } - let deposit = metadata.take().ok_or(Error::::UnknownItem)?.deposit; - T::Currency::unreserve(&collection_details.owner, deposit); - collection_details.total_deposit.saturating_reduce(deposit); - - Collection::::insert(&collection, &collection_details); - Self::deposit_event(Event::MetadataCleared { collection, item }); - Ok(()) - }) + .or_else(|origin| ensure_signed(origin).map(Some).map_err(DispatchError::from))?; + Self::do_clear_item_metadata(maybe_check_owner, collection, item) } /// Set the metadata for a collection. @@ -1574,46 +1341,8 @@ pub mod pallet { ) -> DispatchResult { let maybe_check_owner = T::ForceOrigin::try_origin(origin) .map(|_| None) - .or_else(|origin| ensure_signed(origin).map(Some))?; - - let collection_config = Self::get_collection_config(&collection)?; - ensure!( - maybe_check_owner.is_none() || - collection_config.is_setting_enabled(CollectionSetting::UnlockedMetadata), - Error::::LockedCollectionMetadata - ); - - let mut details = - Collection::::get(&collection).ok_or(Error::::UnknownCollection)?; - if let Some(check_owner) = &maybe_check_owner { - ensure!(check_owner == &details.owner, Error::::NoPermission); - } - - CollectionMetadataOf::::try_mutate_exists(collection, |metadata| { - let old_deposit = metadata.take().map_or(Zero::zero(), |m| m.deposit); - details.total_deposit.saturating_reduce(old_deposit); - let mut deposit = Zero::zero(); - if maybe_check_owner.is_some() && - collection_config.is_setting_enabled(CollectionSetting::DepositRequired) - { - deposit = T::DepositPerByte::get() - .saturating_mul(((data.len()) as u32).into()) - .saturating_add(T::MetadataDepositBase::get()); - } - if deposit > old_deposit { - T::Currency::reserve(&details.owner, deposit - old_deposit)?; - } else if deposit < old_deposit { - T::Currency::unreserve(&details.owner, old_deposit - deposit); - } - details.total_deposit.saturating_accrue(deposit); - - Collection::::insert(&collection, details); - - *metadata = Some(CollectionMetadata { deposit, data: data.clone() }); - - Self::deposit_event(Event::CollectionMetadataSet { collection, data }); - Ok(()) - }) + .or_else(|origin| ensure_signed(origin).map(Some).map_err(DispatchError::from))?; + Self::do_set_collection_metadata(maybe_check_owner, collection, data) } /// Clear the metadata for a collection. @@ -1635,27 +1364,8 @@ pub mod pallet { ) -> DispatchResult { let maybe_check_owner = T::ForceOrigin::try_origin(origin) .map(|_| None) - .or_else(|origin| ensure_signed(origin).map(Some))?; - - let details = - Collection::::get(&collection).ok_or(Error::::UnknownCollection)?; - if let Some(check_owner) = &maybe_check_owner { - ensure!(check_owner == &details.owner, Error::::NoPermission); - } - - let collection_config = Self::get_collection_config(&collection)?; - ensure!( - maybe_check_owner.is_none() || - collection_config.is_setting_enabled(CollectionSetting::UnlockedMetadata), - Error::::LockedCollectionMetadata - ); - - CollectionMetadataOf::::try_mutate_exists(collection, |metadata| { - let deposit = metadata.take().ok_or(Error::::UnknownCollection)?.deposit; - T::Currency::unreserve(&details.owner, deposit); - Self::deposit_event(Event::CollectionMetadataCleared { collection }); - Ok(()) - }) + .or_else(|origin| ensure_signed(origin).map(Some).map_err(DispatchError::from))?; + Self::do_clear_collection_metadata(maybe_check_owner, collection) } /// Set (or reset) the acceptance of ownership for a particular account. @@ -1674,23 +1384,7 @@ pub mod pallet { maybe_collection: Option, ) -> DispatchResult { let who = ensure_signed(origin)?; - let old = OwnershipAcceptance::::get(&who); - match (old.is_some(), maybe_collection.is_some()) { - (false, true) => { - frame_system::Pallet::::inc_consumers(&who)?; - }, - (true, false) => { - frame_system::Pallet::::dec_consumers(&who); - }, - _ => {}, - } - if let Some(collection) = maybe_collection.as_ref() { - OwnershipAcceptance::::insert(&who, collection); - } else { - OwnershipAcceptance::::remove(&who); - } - Self::deposit_event(Event::OwnershipAcceptanceChanged { who, maybe_collection }); - Ok(()) + Self::do_set_accept_ownership(who, maybe_collection) } /// Set the maximum amount of items a collection could have. @@ -1698,8 +1392,6 @@ pub mod pallet { /// Origin must be either `ForceOrigin` or `Signed` and the sender should be the Owner of /// the `collection`. /// - /// Note: This function can only succeed once per collection. - /// /// - `collection`: The identifier of the collection to change. /// - `max_supply`: The maximum amount of items a collection could have. /// @@ -1712,24 +1404,33 @@ pub mod pallet { ) -> DispatchResult { let maybe_check_owner = T::ForceOrigin::try_origin(origin) .map(|_| None) - .or_else(|origin| ensure_signed(origin).map(Some))?; - - ensure!( - !CollectionMaxSupply::::contains_key(&collection), - Error::::MaxSupplyAlreadySet - ); - - let details = - Collection::::get(&collection).ok_or(Error::::UnknownCollection)?; - if let Some(check_owner) = &maybe_check_owner { - ensure!(check_owner == &details.owner, Error::::NoPermission); - } - - ensure!(details.items <= max_supply, Error::::MaxSupplyTooSmall); + .or_else(|origin| ensure_signed(origin).map(Some).map_err(DispatchError::from))?; + Self::do_set_collection_max_supply(maybe_check_owner, collection, max_supply) + } - CollectionMaxSupply::::insert(&collection, max_supply); - Self::deposit_event(Event::CollectionMaxSupplySet { collection, max_supply }); - Ok(()) + /// Update mint settings. + /// + /// Origin must be either `ForceOrigin` or `Signed` and the sender should be the Owner of + /// the `collection`. + /// + /// - `collection`: The identifier of the collection to change. + /// - `mint_settings`: The new mint settings. + /// + /// Emits `CollectionMintSettingsUpdated` event when successful. + #[pallet::weight(T::WeightInfo::update_mint_settings())] + pub fn update_mint_settings( + origin: OriginFor, + collection: T::CollectionId, + mint_settings: MintSettings< + BalanceOf, + ::BlockNumber, + T::CollectionId, + >, + ) -> DispatchResult { + let maybe_check_owner = T::ForceOrigin::try_origin(origin) + .map(|_| None) + .or_else(|origin| ensure_signed(origin).map(Some).map_err(DispatchError::from))?; + Self::do_update_mint_settings(maybe_check_owner, collection, mint_settings) } /// Set (or reset) the price for an item. diff --git a/frame/nfts/src/tests.rs b/frame/nfts/src/tests.rs index d0841ebc1f238..b58c81b1d70f8 100644 --- a/frame/nfts/src/tests.rs +++ b/frame/nfts/src/tests.rs @@ -18,6 +18,7 @@ //! Tests for Nfts pallet. use crate::{mock::*, Event, *}; +use enumflags2::BitFlags; use frame_support::{ assert_noop, assert_ok, dispatch::Dispatchable, @@ -92,12 +93,34 @@ fn events() -> Vec> { result } -fn default_collection_config() -> CollectionConfig { - CollectionConfig::disable_settings(CollectionSetting::DepositRequired.into()) +fn collection_config_from_disabled_settings( + settings: BitFlags, +) -> CollectionConfigFor { + CollectionConfig { + settings: CollectionSettings::from_disabled(settings), + max_supply: None, + mint_settings: MintSettings::default(), + } +} + +fn collection_config_with_all_settings_enabled() -> CollectionConfigFor { + CollectionConfig { + settings: CollectionSettings::all_enabled(), + max_supply: None, + mint_settings: MintSettings::default(), + } +} + +fn default_collection_config() -> CollectionConfigFor { + collection_config_from_disabled_settings(CollectionSetting::DepositRequired.into()) } fn default_item_config() -> ItemConfig { - ItemConfig::all_settings_enabled() + ItemConfig { settings: ItemSettings::all_enabled() } +} + +fn item_config_from_disabled_settings(settings: BitFlags) -> ItemConfig { + ItemConfig { settings: ItemSettings::from_disabled(settings) } } #[test] @@ -112,12 +135,12 @@ fn basic_minting_should_work() { new_test_ext().execute_with(|| { assert_ok!(Nfts::force_create(RuntimeOrigin::root(), 1, default_collection_config())); assert_eq!(collections(), vec![(1, 0)]); - assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 42, 1, default_item_config())); + assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 42, None)); assert_eq!(items(), vec![(1, 0, 42)]); assert_ok!(Nfts::force_create(RuntimeOrigin::root(), 2, default_collection_config())); assert_eq!(collections(), vec![(1, 0), (2, 1)]); - assert_ok!(Nfts::mint(RuntimeOrigin::signed(2), 1, 69, 1, default_item_config())); + assert_ok!(Nfts::force_mint(RuntimeOrigin::signed(2), 1, 69, 1, default_item_config())); assert_eq!(items(), vec![(1, 0, 42), (1, 1, 69)]); }); } @@ -126,10 +149,11 @@ fn basic_minting_should_work() { fn lifecycle_should_work() { new_test_ext().execute_with(|| { Balances::make_free_balance_be(&1, 100); + Balances::make_free_balance_be(&2, 100); assert_ok!(Nfts::create( RuntimeOrigin::signed(1), 1, - CollectionConfig::all_settings_enabled() + collection_config_with_all_settings_enabled() )); assert_eq!(Balances::reserved_balance(&1), 2); assert_eq!(collections(), vec![(1, 0)]); @@ -137,14 +161,19 @@ fn lifecycle_should_work() { assert_eq!(Balances::reserved_balance(&1), 5); assert!(CollectionMetadataOf::::contains_key(0)); - assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 42, 10, default_item_config())); + assert_ok!(Nfts::force_mint(RuntimeOrigin::signed(1), 0, 42, 10, default_item_config())); assert_eq!(Balances::reserved_balance(&1), 6); - assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 69, 20, default_item_config())); + assert_ok!(Nfts::force_mint(RuntimeOrigin::signed(1), 0, 69, 20, default_item_config())); assert_eq!(Balances::reserved_balance(&1), 7); - assert_eq!(items(), vec![(10, 0, 42), (20, 0, 69)]); - assert_eq!(Collection::::get(0).unwrap().items, 2); + assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 70, None)); + assert_eq!(items(), vec![(1, 0, 70), (10, 0, 42), (20, 0, 69)]); + assert_eq!(Collection::::get(0).unwrap().items, 3); assert_eq!(Collection::::get(0).unwrap().item_metadatas, 0); + assert_eq!(Balances::reserved_balance(&2), 0); + assert_ok!(Nfts::transfer(RuntimeOrigin::signed(1), 0, 70, 2)); + assert_eq!(Balances::reserved_balance(&2), 1); + assert_ok!(Nfts::set_metadata(RuntimeOrigin::signed(1), 0, 42, bvec![42, 42])); assert_eq!(Balances::reserved_balance(&1), 10); assert!(ItemMetadataOf::::contains_key(0, 42)); @@ -153,7 +182,7 @@ fn lifecycle_should_work() { assert!(ItemMetadataOf::::contains_key(0, 69)); let w = Nfts::get_destroy_witness(&0).unwrap(); - assert_eq!(w.items, 2); + assert_eq!(w.items, 3); assert_eq!(w.item_metadatas, 2); assert_ok!(Nfts::destroy(RuntimeOrigin::signed(1), 0, w)); assert_eq!(Balances::reserved_balance(&1), 0); @@ -177,11 +206,11 @@ fn destroy_with_bad_witness_should_not_work() { assert_ok!(Nfts::create( RuntimeOrigin::signed(1), 1, - CollectionConfig::all_settings_enabled() + collection_config_with_all_settings_enabled() )); let w = Collection::::get(0).unwrap().destroy_witness(); - assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 42, 1, default_item_config())); + assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 42, None)); assert_noop!(Nfts::destroy(RuntimeOrigin::signed(1), 0, w), Error::::BadWitness); }); } @@ -190,10 +219,55 @@ fn destroy_with_bad_witness_should_not_work() { fn mint_should_work() { new_test_ext().execute_with(|| { assert_ok!(Nfts::force_create(RuntimeOrigin::root(), 1, default_collection_config())); - assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 42, 1, default_item_config())); + assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 42, None)); assert_eq!(Nfts::owner(0, 42).unwrap(), 1); assert_eq!(collections(), vec![(1, 0)]); assert_eq!(items(), vec![(1, 0, 42)]); + + // validate minting start and end settings + assert_ok!(Nfts::update_mint_settings( + RuntimeOrigin::signed(1), + 0, + MintSettings { start_block: Some(2), end_block: Some(3), ..Default::default() } + )); + + System::set_block_number(1); + assert_noop!( + Nfts::mint(RuntimeOrigin::signed(1), 0, 43, None), + Error::::MintNotStated + ); + System::set_block_number(4); + assert_noop!(Nfts::mint(RuntimeOrigin::signed(1), 0, 43, None), Error::::MintEnded); + + // validate price + assert_ok!(Nfts::update_mint_settings( + RuntimeOrigin::signed(1), + 0, + MintSettings { mint_type: MintType::Public, price: Some(1), ..Default::default() } + )); + Balances::make_free_balance_be(&2, 100); + assert_ok!(Nfts::mint(RuntimeOrigin::signed(2), 0, 43, None)); + assert_eq!(Balances::total_balance(&2), 99); + + // validate types + assert_ok!(Nfts::force_create(RuntimeOrigin::root(), 1, default_collection_config())); + assert_ok!(Nfts::update_mint_settings( + RuntimeOrigin::signed(1), + 1, + MintSettings { mint_type: MintType::HolderOf(0), ..Default::default() } + )); + assert_noop!(Nfts::mint(RuntimeOrigin::signed(3), 1, 42, None), Error::::BadWitness); + assert_noop!(Nfts::mint(RuntimeOrigin::signed(2), 1, 42, None), Error::::BadWitness); + assert_noop!( + Nfts::mint(RuntimeOrigin::signed(2), 1, 42, Some(MintWitness { owner_of_item: 42 })), + Error::::BadWitness + ); + assert_ok!(Nfts::mint( + RuntimeOrigin::signed(2), + 1, + 42, + Some(MintWitness { owner_of_item: 43 }) + )); }); } @@ -201,7 +275,7 @@ fn mint_should_work() { fn transfer_should_work() { new_test_ext().execute_with(|| { assert_ok!(Nfts::force_create(RuntimeOrigin::root(), 1, default_collection_config())); - assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 42, 2, default_item_config())); + assert_ok!(Nfts::force_mint(RuntimeOrigin::signed(1), 0, 42, 2, default_item_config())); assert_ok!(Nfts::transfer(RuntimeOrigin::signed(2), 0, 42, 3)); assert_eq!(items(), vec![(3, 0, 42)]); @@ -218,12 +292,12 @@ fn transfer_should_work() { assert_ok!(Nfts::force_create( RuntimeOrigin::root(), 1, - CollectionConfig::disable_settings( + collection_config_from_disabled_settings( CollectionSetting::TransferableItems | CollectionSetting::DepositRequired ) )); - assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 1, 1, 42, default_item_config())); + assert_ok!(Nfts::force_mint(RuntimeOrigin::signed(1), 1, 1, 42, default_item_config())); assert_noop!( Nfts::transfer(RuntimeOrigin::signed(1), collection_id, 42, 3,), @@ -236,7 +310,7 @@ fn transfer_should_work() { fn locking_transfer_should_work() { new_test_ext().execute_with(|| { assert_ok!(Nfts::force_create(RuntimeOrigin::root(), 1, default_collection_config())); - assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 42, 1, default_item_config())); + assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 42, None)); assert_ok!(Nfts::lock_item_transfer(RuntimeOrigin::signed(1), 0, 42)); assert_noop!(Nfts::transfer(RuntimeOrigin::signed(1), 0, 42, 2), Error::::ItemLocked); @@ -244,21 +318,17 @@ fn locking_transfer_should_work() { assert_ok!(Nfts::lock_collection( RuntimeOrigin::signed(1), 0, - CollectionConfig::disable_settings(CollectionSetting::TransferableItems.into()) + CollectionSettings::from_disabled(CollectionSetting::TransferableItems.into()) )); assert_noop!( Nfts::transfer(RuntimeOrigin::signed(1), 0, 42, 2), Error::::ItemsNonTransferable ); - assert_ok!(Nfts::force_collection_status( + assert_ok!(Nfts::force_collection_config( RuntimeOrigin::root(), 0, - 1, - 1, - 1, - 1, - CollectionConfig::all_settings_enabled(), + collection_config_with_all_settings_enabled(), )); assert_ok!(Nfts::transfer(RuntimeOrigin::signed(1), 0, 42, 2)); }); @@ -268,7 +338,7 @@ fn locking_transfer_should_work() { fn origin_guards_should_work() { new_test_ext().execute_with(|| { assert_ok!(Nfts::force_create(RuntimeOrigin::root(), 1, default_collection_config())); - assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 42, 1, default_item_config())); + assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 42, None)); Balances::make_free_balance_be(&2, 100); assert_ok!(Nfts::set_accept_ownership(RuntimeOrigin::signed(2), Some(0))); @@ -289,7 +359,7 @@ fn origin_guards_should_work() { Error::::NoPermission ); assert_noop!( - Nfts::mint(RuntimeOrigin::signed(2), 0, 69, 2, default_item_config()), + Nfts::mint(RuntimeOrigin::signed(2), 0, 69, None), Error::::NoPermission ); assert_noop!( @@ -310,7 +380,7 @@ fn transfer_owner_should_work() { assert_ok!(Nfts::create( RuntimeOrigin::signed(1), 1, - CollectionConfig::all_settings_enabled() + collection_config_with_all_settings_enabled() )); assert_eq!(collections(), vec![(1, 0)]); assert_noop!( @@ -334,17 +404,22 @@ fn transfer_owner_should_work() { // Mint and set metadata now and make sure that deposit gets transferred back. assert_ok!(Nfts::set_collection_metadata(RuntimeOrigin::signed(2), 0, bvec![0u8; 20])); - assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 42, 1, default_item_config())); + assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 42, None)); + assert_eq!(Balances::reserved_balance(&1), 1); assert_ok!(Nfts::set_metadata(RuntimeOrigin::signed(2), 0, 42, bvec![0u8; 20])); assert_ok!(Nfts::set_accept_ownership(RuntimeOrigin::signed(3), Some(0))); assert_ok!(Nfts::transfer_ownership(RuntimeOrigin::signed(2), 0, 3)); assert_eq!(collections(), vec![(3, 0)]); - assert_eq!(Balances::total_balance(&2), 57); - assert_eq!(Balances::total_balance(&3), 145); + assert_eq!(Balances::total_balance(&2), 58); + assert_eq!(Balances::total_balance(&3), 144); assert_eq!(Balances::reserved_balance(&2), 0); - assert_eq!(Balances::reserved_balance(&3), 45); + assert_eq!(Balances::reserved_balance(&3), 44); + + assert_ok!(Nfts::transfer(RuntimeOrigin::signed(1), 0, 42, 2)); + assert_eq!(Balances::reserved_balance(&1), 0); + assert_eq!(Balances::reserved_balance(&2), 1); - // 2's acceptence from before is reset when it became owner, so it cannot be transfered + // 2's acceptance from before is reset when it became an owner, so it cannot be transferred // without a fresh acceptance. assert_noop!( Nfts::transfer_ownership(RuntimeOrigin::signed(3), 0, 2), @@ -359,7 +434,7 @@ fn set_team_should_work() { assert_ok!(Nfts::force_create(RuntimeOrigin::root(), 1, default_collection_config())); assert_ok!(Nfts::set_team(RuntimeOrigin::signed(1), 0, 2, 3, 4)); - assert_ok!(Nfts::mint(RuntimeOrigin::signed(2), 0, 42, 2, default_item_config())); + assert_ok!(Nfts::mint(RuntimeOrigin::signed(2), 0, 42, None)); assert_ok!(Nfts::lock_item_transfer(RuntimeOrigin::signed(4), 0, 42)); assert_ok!(Nfts::unlock_item_transfer(RuntimeOrigin::signed(4), 0, 42)); assert_ok!(Nfts::transfer(RuntimeOrigin::signed(3), 0, 42, 3)); @@ -373,12 +448,12 @@ fn set_collection_metadata_should_work() { // Cannot add metadata to unknown item assert_noop!( Nfts::set_collection_metadata(RuntimeOrigin::signed(1), 0, bvec![0u8; 20]), - Error::::UnknownCollection, + Error::::NoConfig, ); assert_ok!(Nfts::force_create( RuntimeOrigin::root(), 1, - CollectionConfig::all_settings_enabled() + collection_config_with_all_settings_enabled() )); // Cannot add metadata to unowned item assert_noop!( @@ -412,7 +487,7 @@ fn set_collection_metadata_should_work() { assert_ok!(Nfts::lock_collection( RuntimeOrigin::signed(1), 0, - CollectionConfig::disable_settings(CollectionSetting::UnlockedMetadata.into()) + CollectionSettings::from_disabled(CollectionSetting::UnlockedMetadata.into()) )); assert_noop!( Nfts::set_collection_metadata(RuntimeOrigin::signed(1), 0, bvec![0u8; 15]), @@ -451,9 +526,9 @@ fn set_item_metadata_should_work() { assert_ok!(Nfts::force_create( RuntimeOrigin::root(), 1, - CollectionConfig::all_settings_enabled() + collection_config_with_all_settings_enabled() )); - assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 42, 1, default_item_config())); + assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 42, None)); // Cannot add metadata to unowned item assert_noop!( Nfts::set_metadata(RuntimeOrigin::signed(2), 0, 42, bvec![0u8; 20]), @@ -515,9 +590,9 @@ fn set_attribute_should_work() { assert_ok!(Nfts::force_create( RuntimeOrigin::root(), 1, - CollectionConfig::all_settings_enabled() + collection_config_with_all_settings_enabled() )); - assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 0, 1, default_item_config())); + assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 0, None)); assert_ok!(Nfts::set_attribute(RuntimeOrigin::signed(1), 0, None, bvec![0], bvec![0])); assert_ok!(Nfts::set_attribute(RuntimeOrigin::signed(1), 0, Some(0), bvec![0], bvec![0])); @@ -565,10 +640,10 @@ fn set_attribute_should_respect_lock() { assert_ok!(Nfts::force_create( RuntimeOrigin::root(), 1, - CollectionConfig::all_settings_enabled() + collection_config_with_all_settings_enabled() )); - assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 0, 1, default_item_config())); - assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 1, 1, default_item_config())); + assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 0, None)); + assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 1, None)); assert_ok!(Nfts::set_attribute(RuntimeOrigin::signed(1), 0, None, bvec![0], bvec![0])); assert_ok!(Nfts::set_attribute(RuntimeOrigin::signed(1), 0, Some(0), bvec![0], bvec![0])); @@ -587,7 +662,7 @@ fn set_attribute_should_respect_lock() { assert_ok!(Nfts::lock_collection( RuntimeOrigin::signed(1), 0, - CollectionConfig::disable_settings(CollectionSetting::UnlockedAttributes.into()) + CollectionSettings::from_disabled(CollectionSetting::UnlockedAttributes.into()) )); let e = Error::::LockedCollectionAttributes; @@ -612,10 +687,10 @@ fn preserve_config_for_frozen_items() { assert_ok!(Nfts::force_create( RuntimeOrigin::root(), 1, - CollectionConfig::all_settings_enabled() + collection_config_with_all_settings_enabled() )); - assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 0, 1, default_item_config())); - assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 1, 1, default_item_config())); + assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 0, None)); + assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 1, None)); // if the item is not locked/frozen then the config gets deleted on item burn assert_ok!(Nfts::burn(RuntimeOrigin::signed(1), 0, 1, Some(1))); @@ -624,8 +699,9 @@ fn preserve_config_for_frozen_items() { // lock the item and ensure the config stays unchanged assert_ok!(Nfts::lock_item_properties(RuntimeOrigin::signed(1), 0, 0, true, true)); - let expect_config = - ItemConfig(ItemSetting::UnlockedAttributes | ItemSetting::UnlockedMetadata); + let expect_config = item_config_from_disabled_settings( + ItemSetting::UnlockedAttributes | ItemSetting::UnlockedMetadata, + ); let config = ItemConfigOf::::get(0, 0).unwrap(); assert_eq!(config, expect_config); @@ -635,69 +711,72 @@ fn preserve_config_for_frozen_items() { // can't mint with the different config assert_noop!( - Nfts::mint(RuntimeOrigin::signed(1), 0, 0, 1, default_item_config()), + Nfts::force_mint(RuntimeOrigin::signed(1), 0, 0, 1, default_item_config()), Error::::InconsistentItemConfig ); - assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 0, 1, expect_config)); + assert_ok!(Nfts::update_mint_settings( + RuntimeOrigin::signed(1), + 0, + MintSettings { + default_item_settings: ItemSettings::from_disabled( + ItemSetting::UnlockedAttributes | ItemSetting::UnlockedMetadata + ), + ..Default::default() + } + )); + assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 0, None)); }); } #[test] -fn force_collection_status_should_work() { +fn force_update_collection_should_work() { new_test_ext().execute_with(|| { Balances::make_free_balance_be(&1, 100); assert_ok!(Nfts::force_create( RuntimeOrigin::root(), 1, - CollectionConfig::all_settings_enabled() + collection_config_with_all_settings_enabled() )); - assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 42, 1, default_item_config())); - assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 69, 2, default_item_config())); + assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 42, None)); + assert_ok!(Nfts::force_mint(RuntimeOrigin::signed(1), 0, 69, 2, default_item_config())); assert_ok!(Nfts::set_collection_metadata(RuntimeOrigin::signed(1), 0, bvec![0; 20])); assert_ok!(Nfts::set_metadata(RuntimeOrigin::signed(1), 0, 42, bvec![0; 20])); assert_ok!(Nfts::set_metadata(RuntimeOrigin::signed(1), 0, 69, bvec![0; 20])); assert_eq!(Balances::reserved_balance(1), 65); // force item status to be free holding - assert_ok!(Nfts::force_collection_status( + assert_ok!(Nfts::force_collection_config( RuntimeOrigin::root(), 0, - 1, - 1, - 1, - 1, - CollectionConfig::disable_settings(CollectionSetting::DepositRequired.into()), + collection_config_from_disabled_settings(CollectionSetting::DepositRequired.into()), )); - assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 142, 1, default_item_config())); - assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 169, 2, default_item_config())); + assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 142, None)); + assert_ok!(Nfts::force_mint(RuntimeOrigin::signed(1), 0, 169, 2, default_item_config())); assert_ok!(Nfts::set_metadata(RuntimeOrigin::signed(1), 0, 142, bvec![0; 20])); assert_ok!(Nfts::set_metadata(RuntimeOrigin::signed(1), 0, 169, bvec![0; 20])); - assert_eq!(Balances::reserved_balance(1), 65); - assert_ok!(Nfts::redeposit(RuntimeOrigin::signed(1), 0, bvec![0, 42, 50, 69, 100])); - assert_eq!(Balances::reserved_balance(1), 63); + Balances::make_free_balance_be(&5, 100); + assert_ok!(Nfts::force_collection_owner(RuntimeOrigin::root(), 0, 5)); + assert_eq!(collections(), vec![(5, 0)]); + assert_eq!(Balances::reserved_balance(1), 2); + assert_eq!(Balances::reserved_balance(5), 63); - assert_ok!(Nfts::set_metadata(RuntimeOrigin::signed(1), 0, 42, bvec![0; 20])); - assert_eq!(Balances::reserved_balance(1), 42); + assert_ok!(Nfts::redeposit(RuntimeOrigin::signed(5), 0, bvec![0, 42, 50, 69, 100])); + assert_eq!(Balances::reserved_balance(1), 0); - assert_ok!(Nfts::set_metadata(RuntimeOrigin::signed(1), 0, 69, bvec![0; 20])); - assert_eq!(Balances::reserved_balance(1), 21); + assert_ok!(Nfts::set_metadata(RuntimeOrigin::signed(5), 0, 42, bvec![0; 20])); + assert_eq!(Balances::reserved_balance(5), 42); - assert_ok!(Nfts::set_collection_metadata(RuntimeOrigin::signed(1), 0, bvec![0; 20])); - assert_eq!(Balances::reserved_balance(1), 0); + assert_ok!(Nfts::set_metadata(RuntimeOrigin::signed(5), 0, 69, bvec![0; 20])); + assert_eq!(Balances::reserved_balance(5), 21); + + assert_ok!(Nfts::set_collection_metadata(RuntimeOrigin::signed(5), 0, bvec![0; 20])); + assert_eq!(Balances::reserved_balance(5), 0); // validate new roles - assert_ok!(Nfts::force_collection_status( - RuntimeOrigin::root(), - 0, - 1, - 2, - 3, - 4, - CollectionConfig::all_settings_enabled(), - )); + assert_ok!(Nfts::set_team(RuntimeOrigin::root(), 0, 2, 3, 4)); assert_eq!( CollectionRoleOf::::get(0, 2).unwrap(), CollectionRoles(CollectionRole::Issuer.into()) @@ -711,15 +790,7 @@ fn force_collection_status_should_work() { CollectionRoles(CollectionRole::Freezer.into()) ); - assert_ok!(Nfts::force_collection_status( - RuntimeOrigin::root(), - 0, - 1, - 3, - 2, - 3, - CollectionConfig::all_settings_enabled(), - )); + assert_ok!(Nfts::set_team(RuntimeOrigin::root(), 0, 3, 2, 3)); assert_eq!( CollectionRoleOf::::get(0, 2).unwrap(), @@ -739,7 +810,7 @@ fn burn_works() { assert_ok!(Nfts::force_create( RuntimeOrigin::root(), 1, - CollectionConfig::all_settings_enabled() + collection_config_with_all_settings_enabled() )); assert_ok!(Nfts::set_team(RuntimeOrigin::signed(1), 0, 2, 3, 4)); @@ -748,8 +819,8 @@ fn burn_works() { Error::::UnknownCollection ); - assert_ok!(Nfts::mint(RuntimeOrigin::signed(2), 0, 42, 5, default_item_config())); - assert_ok!(Nfts::mint(RuntimeOrigin::signed(2), 0, 69, 5, default_item_config())); + assert_ok!(Nfts::force_mint(RuntimeOrigin::signed(2), 0, 42, 5, default_item_config())); + assert_ok!(Nfts::force_mint(RuntimeOrigin::signed(2), 0, 69, 5, default_item_config())); assert_eq!(Balances::reserved_balance(1), 2); assert_noop!( @@ -771,7 +842,7 @@ fn burn_works() { fn approval_lifecycle_works() { new_test_ext().execute_with(|| { assert_ok!(Nfts::force_create(RuntimeOrigin::root(), 1, default_collection_config())); - assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 42, 2, default_item_config())); + assert_ok!(Nfts::force_mint(RuntimeOrigin::signed(1), 0, 42, 2, default_item_config())); assert_ok!(Nfts::approve_transfer(RuntimeOrigin::signed(2), 0, 42, 3, None)); assert_ok!(Nfts::transfer(RuntimeOrigin::signed(3), 0, 42, 4)); assert_noop!( @@ -788,18 +859,12 @@ fn approval_lifecycle_works() { assert_ok!(Nfts::force_create( RuntimeOrigin::root(), 1, - CollectionConfig::disable_settings( + collection_config_from_disabled_settings( CollectionSetting::TransferableItems | CollectionSetting::DepositRequired ) )); - assert_ok!(Nfts::mint( - RuntimeOrigin::signed(1), - 1, - collection_id, - 1, - default_item_config() - )); + assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 1, collection_id, None)); assert_noop!( Nfts::approve_transfer(RuntimeOrigin::signed(1), collection_id, 1, 2, None), @@ -812,7 +877,7 @@ fn approval_lifecycle_works() { fn cancel_approval_works() { new_test_ext().execute_with(|| { assert_ok!(Nfts::force_create(RuntimeOrigin::root(), 1, default_collection_config())); - assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 42, 2, default_item_config())); + assert_ok!(Nfts::force_mint(RuntimeOrigin::signed(1), 0, 42, 2, default_item_config())); assert_ok!(Nfts::approve_transfer(RuntimeOrigin::signed(2), 0, 42, 3, None)); assert_noop!( @@ -840,7 +905,7 @@ fn cancel_approval_works() { let current_block = 1; System::set_block_number(current_block); - assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 69, 2, default_item_config())); + assert_ok!(Nfts::force_mint(RuntimeOrigin::signed(1), 0, 69, 2, default_item_config())); // approval expires after 2 blocks. assert_ok!(Nfts::approve_transfer(RuntimeOrigin::signed(2), 0, 42, 3, Some(2))); assert_noop!( @@ -859,7 +924,7 @@ fn cancel_approval_works() { fn approving_multiple_accounts_works() { new_test_ext().execute_with(|| { assert_ok!(Nfts::force_create(RuntimeOrigin::root(), 1, default_collection_config())); - assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 42, 2, default_item_config())); + assert_ok!(Nfts::force_mint(RuntimeOrigin::signed(1), 0, 42, 2, default_item_config())); let current_block = 1; System::set_block_number(current_block); @@ -884,7 +949,7 @@ fn approving_multiple_accounts_works() { fn approvals_limit_works() { new_test_ext().execute_with(|| { assert_ok!(Nfts::force_create(RuntimeOrigin::root(), 1, default_collection_config())); - assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 42, 2, default_item_config())); + assert_ok!(Nfts::force_mint(RuntimeOrigin::signed(1), 0, 42, 2, default_item_config())); for i in 3..13 { assert_ok!(Nfts::approve_transfer(RuntimeOrigin::signed(2), 0, 42, i, None)); @@ -906,9 +971,9 @@ fn approval_deadline_works() { assert_ok!(Nfts::force_create( RuntimeOrigin::root(), 1, - CollectionConfig::disable_settings(CollectionSetting::DepositRequired.into()) + collection_config_from_disabled_settings(CollectionSetting::DepositRequired.into()) )); - assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 42, 2, default_item_config())); + assert_ok!(Nfts::force_mint(RuntimeOrigin::signed(1), 0, 42, 2, default_item_config())); // the approval expires after the 2nd block. assert_ok!(Nfts::approve_transfer(RuntimeOrigin::signed(2), 0, 42, 3, Some(2))); @@ -935,7 +1000,7 @@ fn approval_deadline_works() { fn cancel_approval_works_with_admin() { new_test_ext().execute_with(|| { assert_ok!(Nfts::force_create(RuntimeOrigin::root(), 1, default_collection_config())); - assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 42, 2, default_item_config())); + assert_ok!(Nfts::force_mint(RuntimeOrigin::signed(1), 0, 42, 2, default_item_config())); assert_ok!(Nfts::approve_transfer(RuntimeOrigin::signed(2), 0, 42, 3, None)); assert_noop!( @@ -963,7 +1028,7 @@ fn cancel_approval_works_with_admin() { fn cancel_approval_works_with_force() { new_test_ext().execute_with(|| { assert_ok!(Nfts::force_create(RuntimeOrigin::root(), 1, default_collection_config())); - assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 42, 2, default_item_config())); + assert_ok!(Nfts::force_mint(RuntimeOrigin::signed(1), 0, 42, 2, default_item_config())); assert_ok!(Nfts::approve_transfer(RuntimeOrigin::signed(2), 0, 42, 3, None)); assert_noop!( @@ -991,7 +1056,7 @@ fn cancel_approval_works_with_force() { fn clear_all_transfer_approvals_works() { new_test_ext().execute_with(|| { assert_ok!(Nfts::force_create(RuntimeOrigin::root(), 1, default_collection_config())); - assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 42, 2, default_item_config())); + assert_ok!(Nfts::force_mint(RuntimeOrigin::signed(1), 0, 42, 2, default_item_config())); assert_ok!(Nfts::approve_transfer(RuntimeOrigin::signed(2), 0, 42, 3, None)); assert_ok!(Nfts::approve_transfer(RuntimeOrigin::signed(2), 0, 42, 4, None)); @@ -1026,66 +1091,96 @@ fn max_supply_should_work() { new_test_ext().execute_with(|| { let collection_id = 0; let user_id = 1; - let max_supply = 2; + let max_supply = 1; // validate set_collection_max_supply assert_ok!(Nfts::force_create(RuntimeOrigin::root(), user_id, default_collection_config())); - assert!(!CollectionMaxSupply::::contains_key(collection_id)); + assert_eq!(CollectionConfigOf::::get(collection_id).unwrap().max_supply, None); assert_ok!(Nfts::set_collection_max_supply( RuntimeOrigin::signed(user_id), collection_id, max_supply )); - assert_eq!(CollectionMaxSupply::::get(collection_id).unwrap(), max_supply); + assert_eq!( + CollectionConfigOf::::get(collection_id).unwrap().max_supply, + Some(max_supply) + ); assert!(events().contains(&Event::::CollectionMaxSupplySet { collection: collection_id, max_supply, })); - assert_noop!( - Nfts::set_collection_max_supply( - RuntimeOrigin::signed(user_id), - collection_id, - max_supply + 1 - ), - Error::::MaxSupplyAlreadySet - ); - - // validate we can't mint more to max supply - assert_ok!(Nfts::mint( + assert_ok!(Nfts::set_collection_max_supply( RuntimeOrigin::signed(user_id), collection_id, - 0, - user_id, - default_item_config() + max_supply + 1 )); - assert_ok!(Nfts::mint( + assert_ok!(Nfts::lock_collection( RuntimeOrigin::signed(user_id), collection_id, - 1, - user_id, - default_item_config() + CollectionSettings::from_disabled(CollectionSetting::UnlockedMaxSupply.into()) )); assert_noop!( - Nfts::mint( + Nfts::set_collection_max_supply( RuntimeOrigin::signed(user_id), collection_id, - 2, - user_id, - default_item_config() + max_supply + 2 ), + Error::::MaxSupplyLocked + ); + + // validate we can't mint more to max supply + assert_ok!(Nfts::mint(RuntimeOrigin::signed(user_id), collection_id, 0, None)); + assert_ok!(Nfts::mint(RuntimeOrigin::signed(user_id), collection_id, 1, None)); + assert_noop!( + Nfts::mint(RuntimeOrigin::signed(user_id), collection_id, 2, None), Error::::MaxSupplyReached ); + }); +} + +#[test] +fn mint_settings_should_work() { + new_test_ext().execute_with(|| { + let collection_id = 0; + let user_id = 1; + let item_id = 0; + + assert_ok!(Nfts::force_create(RuntimeOrigin::root(), user_id, default_collection_config())); + assert_ok!(Nfts::mint(RuntimeOrigin::signed(user_id), collection_id, item_id, None)); + assert_eq!( + ItemConfigOf::::get(collection_id, item_id) + .unwrap() + .settings + .get_disabled(), + ItemSettings::all_enabled().get_disabled() + ); - // validate we remove the CollectionMaxSupply record when we destroy the collection - assert_ok!(Nfts::destroy( - RuntimeOrigin::signed(user_id), - collection_id, - Nfts::get_destroy_witness(&collection_id).unwrap() + let collection_id = 1; + assert_ok!(Nfts::force_create( + RuntimeOrigin::root(), + user_id, + CollectionConfig { + mint_settings: MintSettings { + default_item_settings: ItemSettings::from_disabled( + ItemSetting::Transferable | ItemSetting::UnlockedMetadata + ), + ..Default::default() + }, + ..default_collection_config() + } )); - assert!(!CollectionMaxSupply::::contains_key(collection_id)); + assert_ok!(Nfts::mint(RuntimeOrigin::signed(user_id), collection_id, item_id, None)); + assert_eq!( + ItemConfigOf::::get(collection_id, item_id) + .unwrap() + .settings + .get_disabled(), + ItemSettings::from_disabled(ItemSetting::Transferable | ItemSetting::UnlockedMetadata) + .get_disabled() + ); }); } @@ -1099,20 +1194,8 @@ fn set_price_should_work() { assert_ok!(Nfts::force_create(RuntimeOrigin::root(), user_id, default_collection_config())); - assert_ok!(Nfts::mint( - RuntimeOrigin::signed(user_id), - collection_id, - item_1, - user_id, - default_item_config() - )); - assert_ok!(Nfts::mint( - RuntimeOrigin::signed(user_id), - collection_id, - item_2, - user_id, - default_item_config() - )); + assert_ok!(Nfts::mint(RuntimeOrigin::signed(user_id), collection_id, item_1, None)); + assert_ok!(Nfts::mint(RuntimeOrigin::signed(user_id), collection_id, item_2, None)); assert_ok!(Nfts::set_price( RuntimeOrigin::signed(user_id), @@ -1164,18 +1247,12 @@ fn set_price_should_work() { assert_ok!(Nfts::force_create( RuntimeOrigin::root(), user_id, - CollectionConfig::disable_settings( + collection_config_from_disabled_settings( CollectionSetting::TransferableItems | CollectionSetting::DepositRequired ) )); - assert_ok!(Nfts::mint( - RuntimeOrigin::signed(user_id), - collection_id, - item_1, - user_id, - default_item_config() - )); + assert_ok!(Nfts::mint(RuntimeOrigin::signed(user_id), collection_id, item_1, None)); assert_noop!( Nfts::set_price(RuntimeOrigin::signed(user_id), collection_id, item_1, Some(2), None), @@ -1204,27 +1281,9 @@ fn buy_item_should_work() { assert_ok!(Nfts::force_create(RuntimeOrigin::root(), user_1, default_collection_config())); - assert_ok!(Nfts::mint( - RuntimeOrigin::signed(user_1), - collection_id, - item_1, - user_1, - default_item_config(), - )); - assert_ok!(Nfts::mint( - RuntimeOrigin::signed(user_1), - collection_id, - item_2, - user_1, - default_item_config(), - )); - assert_ok!(Nfts::mint( - RuntimeOrigin::signed(user_1), - collection_id, - item_3, - user_1, - default_item_config(), - )); + assert_ok!(Nfts::mint(RuntimeOrigin::signed(user_1), collection_id, item_1, None)); + assert_ok!(Nfts::mint(RuntimeOrigin::signed(user_1), collection_id, item_2, None)); + assert_ok!(Nfts::mint(RuntimeOrigin::signed(user_1), collection_id, item_3, None)); assert_ok!(Nfts::set_price( RuntimeOrigin::signed(user_1), @@ -1308,7 +1367,7 @@ fn buy_item_should_work() { assert_ok!(Nfts::lock_collection( RuntimeOrigin::signed(user_1), collection_id, - CollectionConfig::disable_settings(CollectionSetting::TransferableItems.into()) + CollectionSettings::from_disabled(CollectionSetting::TransferableItems.into()) )); let buy_item_call = mock::RuntimeCall::Nfts(crate::Call::::buy_item { @@ -1322,21 +1381,17 @@ fn buy_item_should_work() { ); // unlock the collection - assert_ok!(Nfts::force_collection_status( + assert_ok!(Nfts::force_collection_config( RuntimeOrigin::root(), collection_id, - user_1, - user_1, - user_1, - user_1, - CollectionConfig::all_settings_enabled(), + collection_config_with_all_settings_enabled(), )); // lock the transfer assert_ok!(Nfts::lock_item_transfer( RuntimeOrigin::signed(user_1), collection_id, - item_3 + item_3, )); let buy_item_call = mock::RuntimeCall::Nfts(crate::Call::::buy_item { @@ -1413,20 +1468,8 @@ fn create_cancel_swap_should_work() { assert_ok!(Nfts::force_create(RuntimeOrigin::root(), user_id, default_collection_config())); - assert_ok!(Nfts::mint( - RuntimeOrigin::signed(user_id), - collection_id, - item_1, - user_id, - default_item_config(), - )); - assert_ok!(Nfts::mint( - RuntimeOrigin::signed(user_id), - collection_id, - item_2, - user_id, - default_item_config(), - )); + assert_ok!(Nfts::mint(RuntimeOrigin::signed(user_id), collection_id, item_1, None,)); + assert_ok!(Nfts::mint(RuntimeOrigin::signed(user_id), collection_id, item_2, None,)); // validate desired item and the collection exists assert_noop!( @@ -1567,17 +1610,16 @@ fn claim_swap_should_work() { RuntimeOrigin::signed(user_1), collection_id, item_1, - user_1, - default_item_config(), + None, )); - assert_ok!(Nfts::mint( + assert_ok!(Nfts::force_mint( RuntimeOrigin::signed(user_1), collection_id, item_2, user_2, default_item_config(), )); - assert_ok!(Nfts::mint( + assert_ok!(Nfts::force_mint( RuntimeOrigin::signed(user_1), collection_id, item_3, @@ -1588,10 +1630,9 @@ fn claim_swap_should_work() { RuntimeOrigin::signed(user_1), collection_id, item_4, - user_1, - default_item_config(), + None, )); - assert_ok!(Nfts::mint( + assert_ok!(Nfts::force_mint( RuntimeOrigin::signed(user_1), collection_id, item_5, @@ -1753,7 +1794,7 @@ fn various_collection_settings() { new_test_ext().execute_with(|| { // when we set only one value it's required to call .into() on it let config = - CollectionConfig::disable_settings(CollectionSetting::TransferableItems.into()); + collection_config_from_disabled_settings(CollectionSetting::TransferableItems.into()); assert_ok!(Nfts::force_create(RuntimeOrigin::root(), 1, config)); let config = CollectionConfigOf::::get(0).unwrap(); @@ -1761,7 +1802,7 @@ fn various_collection_settings() { assert!(config.is_setting_enabled(CollectionSetting::UnlockedMetadata)); // no need to call .into() for multiple values - let config = CollectionConfig::disable_settings( + let config = collection_config_from_disabled_settings( CollectionSetting::UnlockedMetadata | CollectionSetting::TransferableItems, ); assert_ok!(Nfts::force_create(RuntimeOrigin::root(), 1, config)); @@ -1783,35 +1824,46 @@ fn collection_locking_should_work() { assert_ok!(Nfts::force_create( RuntimeOrigin::root(), user_id, - CollectionConfig::all_settings_enabled() + collection_config_with_all_settings_enabled() )); + let lock_config = + collection_config_from_disabled_settings(CollectionSetting::DepositRequired.into()); + assert_noop!( + Nfts::lock_collection( + RuntimeOrigin::signed(user_id), + collection_id, + lock_config.settings, + ), + Error::::WrongSetting + ); + // validate partial lock - let lock_config = CollectionConfig::disable_settings( + let lock_config = collection_config_from_disabled_settings( CollectionSetting::TransferableItems | CollectionSetting::UnlockedAttributes, ); assert_ok!(Nfts::lock_collection( RuntimeOrigin::signed(user_id), collection_id, - lock_config, + lock_config.settings, )); let stored_config = CollectionConfigOf::::get(collection_id).unwrap(); assert_eq!(stored_config, lock_config); // validate full lock - let full_lock_config = CollectionConfig::disable_settings( - CollectionSetting::TransferableItems | - CollectionSetting::UnlockedMetadata | - CollectionSetting::UnlockedAttributes, - ); assert_ok!(Nfts::lock_collection( RuntimeOrigin::signed(user_id), collection_id, - CollectionConfig::disable_settings(CollectionSetting::UnlockedMetadata.into()), + CollectionSettings::from_disabled(CollectionSetting::UnlockedMetadata.into()), )); let stored_config = CollectionConfigOf::::get(collection_id).unwrap(); + let full_lock_config = collection_config_from_disabled_settings( + CollectionSetting::TransferableItems | + CollectionSetting::UnlockedMetadata | + CollectionSetting::UnlockedAttributes, + ); assert_eq!(stored_config, full_lock_config); }); } @@ -1819,7 +1871,7 @@ fn collection_locking_should_work() { #[test] fn pallet_level_feature_flags_should_work() { new_test_ext().execute_with(|| { - Features::set(&PalletFeatures::disable( + Features::set(&PalletFeatures::from_disabled( PalletFeature::Trading | PalletFeature::Approvals | PalletFeature::Attributes, )); @@ -1829,13 +1881,7 @@ fn pallet_level_feature_flags_should_work() { assert_ok!(Nfts::force_create(RuntimeOrigin::root(), user_id, default_collection_config())); - assert_ok!(Nfts::mint( - RuntimeOrigin::signed(user_id), - collection_id, - item_id, - user_id, - default_item_config(), - )); + assert_ok!(Nfts::mint(RuntimeOrigin::signed(user_id), collection_id, item_id, None,)); // PalletFeature::Trading assert_noop!( diff --git a/frame/nfts/src/types.rs b/frame/nfts/src/types.rs index 0122a817229ac..d57f62be97f39 100644 --- a/frame/nfts/src/types.rs +++ b/frame/nfts/src/types.rs @@ -36,8 +36,10 @@ pub(super) type ApprovalsOf = BoundedBTreeMap< Option<::BlockNumber>, >::ApprovalsLimit, >; +pub(super) type ItemDepositOf = + ItemDeposit, ::AccountId>; pub(super) type ItemDetailsFor = - ItemDetails<::AccountId, DepositBalanceOf, ApprovalsOf>; + ItemDetails<::AccountId, ItemDepositOf, ApprovalsOf>; pub(super) type BalanceOf = <>::Currency as Currency<::AccountId>>::Balance; pub(super) type ItemPrice = BalanceOf; @@ -47,6 +49,11 @@ pub(super) type ItemTipOf = ItemTip< ::AccountId, BalanceOf, >; +pub(super) type CollectionConfigFor = CollectionConfig< + BalanceOf, + ::BlockNumber, + >::CollectionId, +>; pub trait Incrementable { fn increment(&self) -> Self; @@ -95,14 +102,23 @@ impl CollectionDetails { /// Information concerning the ownership of a single unique item. #[derive(Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug, Default, TypeInfo, MaxEncodedLen)] -pub struct ItemDetails { +pub struct ItemDetails { /// The owner of this item. pub(super) owner: AccountId, /// The approved transferrer of this item, if one is set. pub(super) approvals: Approvals, /// The amount held in the pallet's default account for this item. Free-hold items will have /// this as zero. - pub(super) deposit: DepositBalance, + pub(super) deposit: Deposit, +} + +/// Information about the reserved item deposit. +#[derive(Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug, TypeInfo, MaxEncodedLen)] +pub struct ItemDeposit { + /// A depositor account. + pub(super) account: AccountId, + /// An amount that gets reserved. + pub(super) amount: DepositBalance, } #[derive(Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug, Default, TypeInfo, MaxEncodedLen)] @@ -182,39 +198,104 @@ pub enum CollectionSetting { UnlockedMetadata, /// Attributes of this collection can be modified. UnlockedAttributes, + /// The supply of this collection can be modified. + UnlockedMaxSupply, /// When this isn't set then the deposit is required to hold the items of this collection. DepositRequired, } -pub(super) type CollectionSettings = BitFlags; -/// Wrapper type for `CollectionSettings` that implements `Codec`. +/// Wrapper type for `BitFlags` that implements `Codec`. #[derive(Clone, Copy, PartialEq, Eq, Default, RuntimeDebug)] -pub struct CollectionConfig(pub CollectionSettings); +pub struct CollectionSettings(pub BitFlags); -impl CollectionConfig { - pub fn all_settings_enabled() -> Self { +impl CollectionSettings { + pub fn all_enabled() -> Self { Self(BitFlags::EMPTY) } - pub fn get_disabled_settings(&self) -> CollectionSettings { + pub fn get_disabled(&self) -> BitFlags { self.0 } + pub fn is_disabled(&self, setting: CollectionSetting) -> bool { + self.0.contains(setting) + } + pub fn from_disabled(settings: BitFlags) -> Self { + Self(settings) + } +} + +impl_codec_bitflags!(CollectionSettings, u64, CollectionSetting); + +/// Mint type. Can the NFT be create by anyone, or only the creator of the collection, +/// or only by wallets that already hold an NFT from a certain collection? +/// The ownership of a privately minted NFT is still publicly visible. +#[derive(Clone, Copy, Encode, Decode, Eq, PartialEq, RuntimeDebug, TypeInfo, MaxEncodedLen)] +pub enum MintType { + /// Only an `Issuer` could mint items. + Issuer, + /// Anyone could mint items. + Public, + /// Only holders of items in specified collection could mint new items. + HolderOf(CollectionId), +} + +#[derive(Clone, Copy, Encode, Decode, Eq, PartialEq, RuntimeDebug, TypeInfo, MaxEncodedLen)] +pub struct MintSettings { + /// Whether anyone can mint or if minters are restricted to some subset. + pub(super) mint_type: MintType, + /// An optional price per mint. + pub(super) price: Option, + /// When the mint starts. + pub(super) start_block: Option, + /// When the mint ends. + pub(super) end_block: Option, + /// Default settings each item will get during the mint. + pub(super) default_item_settings: ItemSettings, +} + +impl Default for MintSettings { + fn default() -> Self { + Self { + mint_type: MintType::Issuer, + price: None, + start_block: None, + end_block: None, + default_item_settings: ItemSettings::all_enabled(), + } + } +} + +#[derive(Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug, TypeInfo)] +pub struct MintWitness { + /// Provide the id of the item in a required collection. + pub owner_of_item: ItemId, +} + +#[derive( + Clone, Copy, Decode, Default, Encode, MaxEncodedLen, PartialEq, RuntimeDebug, TypeInfo, +)] +pub struct CollectionConfig { + /// Collection's settings. + pub(super) settings: CollectionSettings, + /// Collection's max supply. + pub(super) max_supply: Option, + /// Default settings each item will get during the mint. + pub(super) mint_settings: MintSettings, +} + +impl CollectionConfig { pub fn is_setting_enabled(&self, setting: CollectionSetting) -> bool { - !self.get_disabled_settings().contains(setting) + !self.settings.is_disabled(setting) } pub fn has_disabled_setting(&self, setting: CollectionSetting) -> bool { - self.get_disabled_settings().contains(setting) - } - pub fn disable_settings(settings: CollectionSettings) -> Self { - Self(settings) + self.settings.is_disabled(setting) } pub fn enable_setting(&mut self, setting: CollectionSetting) { - self.0.remove(setting); + self.settings.0.remove(setting); } pub fn disable_setting(&mut self, setting: CollectionSetting) { - self.0.insert(setting); + self.settings.0.insert(setting); } } -impl_codec_bitflags!(CollectionConfig, u64, CollectionSetting); /// Support for up to 64 user-enabled features on an item. #[bitflags] @@ -228,39 +309,53 @@ pub enum ItemSetting { /// Attributes of this item can be modified. UnlockedAttributes, } -pub(super) type ItemSettings = BitFlags; -/// Wrapper type for `ItemSettings` that implements `Codec`. +/// Wrapper type for `BitFlags` that implements `Codec`. #[derive(Clone, Copy, PartialEq, Eq, Default, RuntimeDebug)] -pub struct ItemConfig(pub ItemSettings); +pub struct ItemSettings(pub BitFlags); -impl ItemConfig { - pub fn all_settings_enabled() -> Self { +impl ItemSettings { + pub fn all_enabled() -> Self { Self(BitFlags::EMPTY) } - pub fn get_disabled_settings(&self) -> ItemSettings { + pub fn get_disabled(&self) -> BitFlags { self.0 } + pub fn is_disabled(&self, setting: ItemSetting) -> bool { + self.0.contains(setting) + } + pub fn from_disabled(settings: BitFlags) -> Self { + Self(settings) + } +} + +impl_codec_bitflags!(ItemSettings, u64, ItemSetting); + +#[derive( + Encode, Decode, Default, PartialEq, RuntimeDebug, Clone, Copy, MaxEncodedLen, TypeInfo, +)] +pub struct ItemConfig { + /// Item's settings. + pub(super) settings: ItemSettings, +} + +impl ItemConfig { pub fn is_setting_enabled(&self, setting: ItemSetting) -> bool { - !self.get_disabled_settings().contains(setting) + !self.settings.is_disabled(setting) } pub fn has_disabled_setting(&self, setting: ItemSetting) -> bool { - self.get_disabled_settings().contains(setting) + self.settings.is_disabled(setting) } pub fn has_disabled_settings(&self) -> bool { - !self.get_disabled_settings().is_empty() - } - pub fn disable_settings(settings: ItemSettings) -> Self { - Self(settings) + !self.settings.get_disabled().is_empty() } pub fn enable_setting(&mut self, setting: ItemSetting) { - self.0.remove(setting); + self.settings.0.remove(setting); } pub fn disable_setting(&mut self, setting: ItemSetting) { - self.0.insert(setting); + self.settings.0.insert(setting); } } -impl_codec_bitflags!(ItemConfig, u64, ItemSetting); /// Support for up to 64 system-enabled features on a collection. #[bitflags] @@ -275,8 +370,6 @@ pub enum PalletFeature { Approvals, /// Allow/disallow atomic items swap. Swaps, - /// Allow/disallow public mints. - PublicMints, } /// Wrapper type for `BitFlags` that implements `Codec`. @@ -287,7 +380,7 @@ impl PalletFeatures { pub fn all_enabled() -> Self { Self(BitFlags::EMPTY) } - pub fn disable(features: BitFlags) -> Self { + pub fn from_disabled(features: BitFlags) -> Self { Self(features) } pub fn is_enabled(&self, feature: PalletFeature) -> bool { diff --git a/frame/nfts/src/weights.rs b/frame/nfts/src/weights.rs index 5f6ee43a09ffe..f254726ca19f2 100644 --- a/frame/nfts/src/weights.rs +++ b/frame/nfts/src/weights.rs @@ -50,6 +50,7 @@ pub trait WeightInfo { fn force_create() -> Weight; fn destroy(n: u32, m: u32, a: u32, ) -> Weight; fn mint() -> Weight; + fn force_mint() -> Weight; fn burn() -> Weight; fn transfer() -> Weight; fn redeposit(i: u32, ) -> Weight; @@ -58,7 +59,8 @@ pub trait WeightInfo { fn lock_collection() -> Weight; fn transfer_ownership() -> Weight; fn set_team() -> Weight; - fn force_collection_status() -> Weight; + fn force_collection_owner() -> Weight; + fn force_collection_config() -> Weight; fn lock_item_properties() -> Weight; fn set_attribute() -> Weight; fn clear_attribute() -> Weight; @@ -71,6 +73,7 @@ pub trait WeightInfo { fn clear_all_transfer_approvals() -> Weight; fn set_accept_ownership() -> Weight; fn set_collection_max_supply() -> Weight; + fn update_mint_settings() -> Weight; fn set_price() -> Weight; fn buy_item() -> Weight; fn pay_tips(n: u32, ) -> Weight; @@ -137,6 +140,17 @@ impl WeightInfo for SubstrateWeight { .saturating_add(T::DbWeight::get().reads(4 as u64)) .saturating_add(T::DbWeight::get().writes(4 as u64)) } + // Storage: Nfts Asset (r:1 w:1) + // Storage: Nfts Class (r:1 w:1) + // Storage: Nfts CollectionMaxSupply (r:1 w:0) + // Storage: Nfts CollectionConfigOf (r:1 w:0) + // Storage: Nfts ItemConfigOf (r:0 w:1) + // Storage: Nfts Account (r:0 w:1) + fn force_mint() -> Weight { + Weight::from_ref_time(47_947_000 as u64) + .saturating_add(T::DbWeight::get().reads(4 as u64)) + .saturating_add(T::DbWeight::get().writes(4 as u64)) + } // Storage: Nfts Class (r:1 w:1) // Storage: Nfts Asset (r:1 w:1) // Storage: Nfts ItemConfigOf (r:0 w:1) @@ -211,7 +225,15 @@ impl WeightInfo for SubstrateWeight { // Storage: Nfts Class (r:1 w:1) // Storage: Nfts ClassAccount (r:0 w:1) // Storage: Nfts CollectionConfigOf (r:0 w:1) - fn force_collection_status() -> Weight { + fn force_collection_owner() -> Weight { + Weight::from_ref_time(28_468_000 as u64) + .saturating_add(T::DbWeight::get().reads(1 as u64)) + .saturating_add(T::DbWeight::get().writes(3 as u64)) + } + // Storage: Nfts Class (r:1 w:1) + // Storage: Nfts ClassAccount (r:0 w:1) + // Storage: Nfts CollectionConfigOf (r:0 w:1) + fn force_collection_config() -> Weight { Weight::from_ref_time(28_468_000 as u64) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) @@ -309,6 +331,13 @@ impl WeightInfo for SubstrateWeight { .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } + // Storage: Nfts CollectionMaxSupply (r:1 w:1) + // Storage: Nfts Class (r:1 w:0) + fn update_mint_settings() -> Weight { + Weight::from_ref_time(26_358_000 as u64) + .saturating_add(T::DbWeight::get().reads(2 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) + } // Storage: Nfts Asset (r:1 w:0) // Storage: Nfts CollectionConfigOf (r:1 w:0) // Storage: Nfts ItemConfigOf (r:1 w:0) @@ -419,6 +448,17 @@ impl WeightInfo for () { .saturating_add(RocksDbWeight::get().reads(4 as u64)) .saturating_add(RocksDbWeight::get().writes(4 as u64)) } + // Storage: Nfts Asset (r:1 w:1) + // Storage: Nfts Class (r:1 w:1) + // Storage: Nfts CollectionMaxSupply (r:1 w:0) + // Storage: Nfts CollectionConfigOf (r:1 w:0) + // Storage: Nfts ItemConfigOf (r:0 w:1) + // Storage: Nfts Account (r:0 w:1) + fn force_mint() -> Weight { + Weight::from_ref_time(47_947_000 as u64) + .saturating_add(RocksDbWeight::get().reads(4 as u64)) + .saturating_add(RocksDbWeight::get().writes(4 as u64)) + } // Storage: Nfts Class (r:1 w:1) // Storage: Nfts Asset (r:1 w:1) // Storage: Nfts ItemConfigOf (r:0 w:1) @@ -493,7 +533,15 @@ impl WeightInfo for () { // Storage: Nfts Class (r:1 w:1) // Storage: Nfts ClassAccount (r:0 w:1) // Storage: Nfts CollectionConfigOf (r:0 w:1) - fn force_collection_status() -> Weight { + fn force_collection_owner() -> Weight { + Weight::from_ref_time(28_468_000 as u64) + .saturating_add(RocksDbWeight::get().reads(1 as u64)) + .saturating_add(RocksDbWeight::get().writes(3 as u64)) + } + // Storage: Nfts Class (r:1 w:1) + // Storage: Nfts ClassAccount (r:0 w:1) + // Storage: Nfts CollectionConfigOf (r:0 w:1) + fn force_collection_config() -> Weight { Weight::from_ref_time(28_468_000 as u64) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) @@ -591,6 +639,13 @@ impl WeightInfo for () { .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } + // Storage: Nfts CollectionMaxSupply (r:1 w:1) + // Storage: Nfts Class (r:1 w:0) + fn update_mint_settings() -> Weight { + Weight::from_ref_time(26_358_000 as u64) + .saturating_add(RocksDbWeight::get().reads(2 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) + } // Storage: Nfts Asset (r:1 w:0) // Storage: Nfts CollectionConfigOf (r:1 w:0) // Storage: Nfts ItemConfigOf (r:1 w:0) diff --git a/frame/support/src/traits/tokens/nonfungible_v2.rs b/frame/support/src/traits/tokens/nonfungible_v2.rs index 850195852cf72..4f610d9b80a05 100644 --- a/frame/support/src/traits/tokens/nonfungible_v2.rs +++ b/frame/support/src/traits/tokens/nonfungible_v2.rs @@ -78,7 +78,12 @@ pub trait Mutate: Inspect { /// Mint some `item` to be owned by `who`. /// /// By default, this is not a supported operation. - fn mint_into(_item: &Self::ItemId, _who: &AccountId, _config: &ItemConfig) -> DispatchResult { + fn mint_into( + _item: &Self::ItemId, + _who: &AccountId, + _config: &ItemConfig, + _deposit_collection_owner: bool, + ) -> DispatchResult { Err(TokenError::Unsupported.into()) } @@ -164,8 +169,19 @@ impl< ItemConfig, > Mutate for ItemOf { - fn mint_into(item: &Self::ItemId, who: &AccountId, config: &ItemConfig) -> DispatchResult { - >::mint_into(&A::get(), item, who, config) + fn mint_into( + item: &Self::ItemId, + who: &AccountId, + config: &ItemConfig, + deposit_collection_owner: bool, + ) -> DispatchResult { + >::mint_into( + &A::get(), + item, + who, + config, + deposit_collection_owner, + ) } fn burn(item: &Self::ItemId, maybe_check_owner: Option<&AccountId>) -> DispatchResult { >::burn(&A::get(), item, maybe_check_owner) diff --git a/frame/support/src/traits/tokens/nonfungibles_v2.rs b/frame/support/src/traits/tokens/nonfungibles_v2.rs index d23e6d67573c7..0aec193f68fcb 100644 --- a/frame/support/src/traits/tokens/nonfungibles_v2.rs +++ b/frame/support/src/traits/tokens/nonfungibles_v2.rs @@ -168,6 +168,7 @@ pub trait Mutate: Inspect { _item: &Self::ItemId, _who: &AccountId, _config: &ItemConfig, + _deposit_collection_owner: bool, ) -> DispatchResult { Err(TokenError::Unsupported.into()) } From 841574f94b4b6341f8aabccef455fbbe12eade4d Mon Sep 17 00:00:00 2001 From: lana-shanghai Date: Wed, 16 Nov 2022 17:50:15 +0100 Subject: [PATCH 022/101] Rename module to NFT fractionalisation --- Cargo.lock | 20 ++++++++ Cargo.toml | 1 + bin/node/runtime/Cargo.toml | 3 ++ bin/node/runtime/src/lib.rs | 16 +++++++ frame/erc1155/Cargo.toml | 44 ------------------ frame/nft-fractionalisation/Cargo.toml | 46 +++++++++++++++++++ .../README.md | 0 .../src/benchmarking.rs | 0 .../src/lib.rs | 0 .../src/mock.rs | 10 ++-- .../src/tests.rs | 8 ++-- 11 files changed, 95 insertions(+), 53 deletions(-) delete mode 100644 frame/erc1155/Cargo.toml create mode 100644 frame/nft-fractionalisation/Cargo.toml rename frame/{erc1155 => nft-fractionalisation}/README.md (100%) rename frame/{erc1155 => nft-fractionalisation}/src/benchmarking.rs (100%) rename frame/{erc1155 => nft-fractionalisation}/src/lib.rs (100%) rename frame/{erc1155 => nft-fractionalisation}/src/mock.rs (94%) rename frame/{erc1155 => nft-fractionalisation}/src/tests.rs (56%) diff --git a/Cargo.lock b/Cargo.lock index d2f9b6766b8d3..f6c62cfd17be2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3401,6 +3401,7 @@ dependencies = [ "pallet-membership", "pallet-mmr", "pallet-multisig", + "pallet-nft-fractionalisation", "pallet-nfts", "pallet-nomination-pools", "pallet-nomination-pools-benchmarking", @@ -5687,6 +5688,25 @@ dependencies = [ "sp-std", ] +[[package]] +name = "pallet-nft-fractionalisation" +version = "4.0.0-dev" +dependencies = [ + "frame-benchmarking", + "frame-support", + "frame-system", + "log", + "pallet-assets", + "pallet-balances", + "pallet-uniques", + "parity-scale-codec", + "scale-info", + "sp-core", + "sp-io", + "sp-runtime", + "sp-std", +] + [[package]] name = "pallet-nfts" version = "4.0.0-dev" diff --git a/Cargo.toml b/Cargo.toml index 0a747c8cec7c1..8efd9d2c7c488 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -115,6 +115,7 @@ members = [ "frame/preimage", "frame/proxy", "frame/nfts", + "frame/nft-fractionalisation", "frame/nomination-pools", "frame/nomination-pools/fuzzer", "frame/nomination-pools/benchmarking", diff --git a/bin/node/runtime/Cargo.toml b/bin/node/runtime/Cargo.toml index 583f50a164e1e..f93cd8eb628bd 100644 --- a/bin/node/runtime/Cargo.toml +++ b/bin/node/runtime/Cargo.toml @@ -78,6 +78,7 @@ pallet-membership = { version = "4.0.0-dev", default-features = false, path = ". pallet-mmr = { version = "4.0.0-dev", default-features = false, path = "../../../frame/merkle-mountain-range" } pallet-multisig = { version = "4.0.0-dev", default-features = false, path = "../../../frame/multisig" } pallet-nfts = { version = "4.0.0-dev", default-features = false, path = "../../../frame/nfts" } +pallet-nft-fractionalisation = { version = "4.0.0-dev", default-features = false, path = "../../../frame/nft-fractionalisation" } pallet-nomination-pools = { version = "1.0.0", default-features = false, path = "../../../frame/nomination-pools"} pallet-nomination-pools-benchmarking = { version = "1.0.0", default-features = false, optional = true, path = "../../../frame/nomination-pools/benchmarking" } pallet-nomination-pools-runtime-api = { version = "1.0.0-dev", default-features = false, path = "../../../frame/nomination-pools/runtime-api" } @@ -198,6 +199,7 @@ std = [ "pallet-recovery/std", "pallet-uniques/std", "pallet-nfts/std", + "pallet-nft-fractionalisation/std", "pallet-vesting/std", "log/std", "frame-try-runtime?/std", @@ -313,6 +315,7 @@ try-runtime = [ "pallet-transaction-storage/try-runtime", "pallet-uniques/try-runtime", "pallet-nfts/try-runtime", + "pallet-nft-fractionalisation/try-runtime", "pallet-vesting/try-runtime", "pallet-whitelist/try-runtime", ] diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index be083efcc0706..2f68741e71528 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -89,6 +89,8 @@ pub use pallet_sudo::Call as SudoCall; #[cfg(any(feature = "std", test))] pub use sp_runtime::BuildStorage; +pub use pallet_nft_fractionalisation; + /// Implementations of some helper traits passed into runtime modules as associated types. pub mod impls; #[cfg(not(feature = "runtime-benchmarks"))] @@ -1545,6 +1547,19 @@ impl pallet_nfts::Config for Runtime { type Locker = (); } +parameter_types! { + pub const NftFractionsPalletId: PalletId = PalletId(*b"fraction"); +} + +impl pallet_nft_fractionalisation::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type PalletId = NftFractionsPalletId; + type Currency = Balances; + type CollectionId = Uniques; + type ItemId = Uniques; + type AssetId = Assets; +} + impl pallet_transaction_storage::Config for Runtime { type RuntimeEvent = RuntimeEvent; type Currency = Balances; @@ -1702,6 +1717,7 @@ construct_runtime!( Gilt: pallet_gilt, Uniques: pallet_uniques, Nfts: pallet_nfts, + NftFractions: pallet_nft_fractionalisation, TransactionStorage: pallet_transaction_storage, VoterList: pallet_bags_list::, StateTrieMigration: pallet_state_trie_migration, diff --git a/frame/erc1155/Cargo.toml b/frame/erc1155/Cargo.toml deleted file mode 100644 index e527471c2b0f8..0000000000000 --- a/frame/erc1155/Cargo.toml +++ /dev/null @@ -1,44 +0,0 @@ -[package] -name = "pallet-erc1155" -version = "4.0.0-dev" -description = "FRAME pallet for semi-fungible tokens." -authors = ["Parity Technologies "] -homepage = "https://substrate.io" -edition = "2021" -license = "Unlicense" -publish = false -repository = "https://github.com/substrate-developer-hub/substrate-node-template/" - -[package.metadata.docs.rs] -targets = ["x86_64-unknown-linux-gnu"] - -[dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = [ - "derive", -] } -log = "0.4" -scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } -frame-benchmarking = { version = "4.0.0-dev", default-features = false, optional = true, git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v0.9.30" } -frame-support = { version = "4.0.0-dev", default-features = false, git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v0.9.30" } -frame-system = { version = "4.0.0-dev", default-features = false, git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v0.9.30" } -pallet-assets = { version = "4.0.0-dev", default-features = false, git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v0.9.30" } -pallet-balances = { version = "4.0.0-dev", default-features = false, git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v0.9.30" } -pallet-uniques = { version = "4.0.0-dev", default-features = false, git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v0.9.30" } -sp-runtime = { version = "6.0.0", default-features = false, git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v0.9.30" } - -[dev-dependencies] -sp-core = { version = "6.0.0", default-features = false, git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v0.9.30" } -sp-io = { version = "6.0.0", default-features = false, git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v0.9.30" } - - -[features] -default = ["std"] -std = [ - "codec/std", - "frame-benchmarking?/std", - "frame-support/std", - "frame-system/std", - "scale-info/std", -] -runtime-benchmarks = ["frame-benchmarking/runtime-benchmarks"] -try-runtime = ["frame-support/try-runtime"] diff --git a/frame/nft-fractionalisation/Cargo.toml b/frame/nft-fractionalisation/Cargo.toml new file mode 100644 index 0000000000000..c16d024b5c53f --- /dev/null +++ b/frame/nft-fractionalisation/Cargo.toml @@ -0,0 +1,46 @@ +[package] +name = "pallet-nft-fractionalisation" +version = "4.0.0-dev" +description = "FRAME pallet for semi-fungible tokens." +authors = ["Parity Technologies "] +homepage = "https://substrate.io" +edition = "2021" +repository = "https://github.com/paritytech/substrate/" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false } +log = { version = "0.4.17", default-features = false } +scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } +frame-benchmarking = { version = "4.0.0-dev", default-features = false, optional = true, path = "../benchmarking" } +frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } +frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } +pallet-assets = { version = "4.0.0-dev", default-features = false, path = "../assets" } +pallet-uniques = { version = "4.0.0-dev", default-features = false, path = "../uniques" } +sp-runtime = { version = "7.0.0", default-features = false, path = "../../primitives/runtime" } +sp-std = { version = "5.0.0", default-features = false, path = "../../primitives/std" } + +[dev-dependencies] +pallet-balances = { version = "4.0.0-dev", path = "../balances" } +sp-core = { version = "7.0.0", path = "../../primitives/core" } +sp-io = { version = "7.0.0", path = "../../primitives/io" } +sp-std = { version = "5.0.0", path = "../../primitives/std" } + +[features] +default = ["std"] +std = [ + "codec/std", + "frame-benchmarking/std", + "frame-support/std", + "frame-system/std", + "log/std", + "pallet-assets/std", + "pallet-uniques/std", + "scale-info/std", + "sp-runtime/std", + "sp-std/std", +] +runtime-benchmarks = ["frame-benchmarking/runtime-benchmarks"] +try-runtime = ["frame-support/try-runtime"] diff --git a/frame/erc1155/README.md b/frame/nft-fractionalisation/README.md similarity index 100% rename from frame/erc1155/README.md rename to frame/nft-fractionalisation/README.md diff --git a/frame/erc1155/src/benchmarking.rs b/frame/nft-fractionalisation/src/benchmarking.rs similarity index 100% rename from frame/erc1155/src/benchmarking.rs rename to frame/nft-fractionalisation/src/benchmarking.rs diff --git a/frame/erc1155/src/lib.rs b/frame/nft-fractionalisation/src/lib.rs similarity index 100% rename from frame/erc1155/src/lib.rs rename to frame/nft-fractionalisation/src/lib.rs diff --git a/frame/erc1155/src/mock.rs b/frame/nft-fractionalisation/src/mock.rs similarity index 94% rename from frame/erc1155/src/mock.rs rename to frame/nft-fractionalisation/src/mock.rs index d531d2401b0ac..199113b3aee8e 100644 --- a/frame/erc1155/src/mock.rs +++ b/frame/nft-fractionalisation/src/mock.rs @@ -1,4 +1,4 @@ -use crate as pallet_erc1155; +use crate as pallet_nft_fractionalisation; use frame_support::{ parameter_types, traits::{AsEnsureOriginWithArg, ConstU128, ConstU16, ConstU32, ConstU64}, @@ -30,7 +30,7 @@ frame_support::construct_runtime!( UncheckedExtrinsic = UncheckedExtrinsic, { System: frame_system, - Erc1155: pallet_erc1155, + NftFractions: pallet_nft_fractionalisation, Assets: pallet_assets, Uniques: pallet_uniques, Balances: pallet_balances, @@ -132,12 +132,12 @@ impl pallet_uniques::Config for Test { } parameter_types! { - pub const ERC1155PalletId: PalletId = PalletId(*b"erc1155 "); + pub const NftFractionsPalletId: PalletId = PalletId(*b"fraction"); } -impl pallet_erc1155::Config for Test { +impl pallet_nft_fractionalisation::Config for Test { type RuntimeEvent = RuntimeEvent; - type PalletId = ERC1155PalletId; + type PalletId = NftFractionsPalletIdPalletId; type Currency = Balances; type CollectionId = Uniques; type ItemId = Uniques; diff --git a/frame/erc1155/src/tests.rs b/frame/nft-fractionalisation/src/tests.rs similarity index 56% rename from frame/erc1155/src/tests.rs rename to frame/nft-fractionalisation/src/tests.rs index 756531d9dc3d6..f0126c9f07ff3 100644 --- a/frame/erc1155/src/tests.rs +++ b/frame/nft-fractionalisation/src/tests.rs @@ -5,11 +5,11 @@ use frame_support::{assert_noop, assert_ok, traits::Currency}; fn address_is_set() { new_test_ext().execute_with(|| { // Dispatch a signed extrinsic. - assert_eq!(Erc1155::pallet_address(), None); - assert_ok!(Erc1155::set_pallet_address(RuntimeOrigin::signed(1))); - assert_eq!(Erc1155::pallet_address(), Some(1u64)); + assert_eq!(NftFractions::pallet_address(), None); + assert_ok!(NftFractions::set_pallet_address(RuntimeOrigin::signed(1))); + assert_eq!(NftFractions::pallet_address(), Some(1u64)); // assert_eq!( - // Erc1155::issuance(), + // NftFractions::issuance(), // Some(>::total_issuance()) // ) }); From eab8e04ac8a762af978482783f4e8eb6df79cbf9 Mon Sep 17 00:00:00 2001 From: lana-shanghai Date: Thu, 17 Nov 2022 12:20:50 +0100 Subject: [PATCH 023/101] Loose coupling for pallet-assets --- bin/node/runtime/src/lib.rs | 4 +- frame/nft-fractionalisation/src/lib.rs | 98 +++++++++++++++++--------- 2 files changed, 68 insertions(+), 34 deletions(-) diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index 2f68741e71528..855723b6f43c8 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -1557,7 +1557,9 @@ impl pallet_nft_fractionalisation::Config for Runtime { type Currency = Balances; type CollectionId = Uniques; type ItemId = Uniques; - type AssetId = Assets; + type AssetBalance = ::Balance; + type Assets = Assets; + type AssetId = ::AssetId; } impl pallet_transaction_storage::Config for Runtime { diff --git a/frame/nft-fractionalisation/src/lib.rs b/frame/nft-fractionalisation/src/lib.rs index 3928b5cbe6649..5d41ea88319e3 100644 --- a/frame/nft-fractionalisation/src/lib.rs +++ b/frame/nft-fractionalisation/src/lib.rs @@ -23,13 +23,26 @@ pub mod pallet { use frame_support::{ dispatch::DispatchResult, - sp_runtime::traits::{AccountIdConversion, StaticLookup}, - traits::Currency, + sp_runtime::traits::{ + AccountIdConversion, AtLeast32BitUnsigned, IntegerSquareRoot, StaticLookup, Zero, + }, + traits::{ + fungibles::{ + metadata::Mutate as MutateMetadata, Create, Inspect, InspectEnumerable, Mutate, + Transfer, + }, + Currency, + }, PalletId, }; pub type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; + pub type AssetIdOf = + <::Assets as Inspect<::AccountId>>::AssetId; + pub type AssetBalanceOf = + <::Assets as Inspect<::AccountId>>::Balance; + pub type AccountIdLookupOf = <::Lookup as StaticLookup>::Source; #[pallet::pallet] @@ -37,8 +50,7 @@ pub mod pallet { pub struct Pallet(_); #[pallet::config] - pub trait Config: - frame_system::Config + pallet_uniques::Config + pallet_assets::Config + pub trait Config: frame_system::Config + pallet_uniques::Config //+ pallet_assets::Config { type RuntimeEvent: From> + IsType<::RuntimeEvent>; @@ -48,7 +60,35 @@ pub mod pallet { type ItemId; - type AssetId; + type AssetBalance: AtLeast32BitUnsigned + + codec::FullCodec + + Copy + + MaybeSerializeDeserialize + + sp_std::fmt::Debug + + Default + + From + + IntegerSquareRoot + + Zero + + TypeInfo + + MaxEncodedLen; + + type AssetId: Member + + Parameter + + Default + + Copy + + codec::HasCompact + + From + + MaybeSerializeDeserialize + + MaxEncodedLen + + PartialOrd + + TypeInfo; + + type Assets: Inspect + + Create + + InspectEnumerable + + Mutate + + MutateMetadata + + Transfer; #[pallet::constant] type PalletId: Get; @@ -57,14 +97,9 @@ pub mod pallet { #[pallet::storage] #[pallet::getter(fn assets_minted)] // TODO: query amount minted from pallet assets instead of storing it locally. - // Add a public getter function to pallet assets. - pub type AssetsMinted = StorageMap< - _, - Twox64Concat, - ::AssetId, - BalanceOf, - OptionQuery, - >; + // Add a public getter function to pallet assets. + pub type AssetsMinted = + StorageMap<_, Twox64Concat, AssetIdOf, BalanceOf, OptionQuery>; #[pallet::event] #[pallet::generate_deposit(pub(super) fn deposit_event)] @@ -73,8 +108,8 @@ pub mod pallet { ::CollectionId, ::ItemId, ), - AssetCreated(::AssetId), - AssetMinted(::AssetId, ::Balance), + AssetCreated(AssetIdOf), + AssetMinted(AssetIdOf, AssetBalanceOf), } #[pallet::error] @@ -89,10 +124,10 @@ pub mod pallet { origin: OriginFor, collection_id: ::CollectionId, item_id: ::ItemId, - asset_id: ::AssetId, - beneficiary: AccountIdLookupOf, - min_balance: ::Balance, - amount: ::Balance, + asset_id: AssetIdOf, + beneficiary: T::AccountId, + min_balance: AssetBalanceOf, + amount: AssetBalanceOf, ) -> DispatchResult { let _who = ensure_signed(origin.clone())?; let admin_account_id = Self::pallet_account_id(); @@ -100,15 +135,15 @@ pub mod pallet { match Self::do_lock_nft(origin.clone(), collection_id, item_id) { Err(e) => return Err(e), - Ok(()) => match Self::do_create_asset(origin.clone(), asset_id, admin, min_balance) + //Ok(()) => match Self::do_create_asset(origin.clone(), asset_id, admin, min_balance) + Ok(()) => match Self::do_create_asset(asset_id, admin_account_id, min_balance) { Err(e) => return Err(e), Ok(()) => match Self::do_mint_asset( // Minting the asset is only possible from the pallet's origin. // TODO: should the minting be possible from the owner's account? - frame_system::RawOrigin::Signed(admin_account_id).into(), asset_id, - beneficiary, + &beneficiary, amount, ) { Err(e) => return Err(e), @@ -124,7 +159,6 @@ pub mod pallet { Ok(()) } - // TODO: return and burn 100% of the asset, unlock the NFT. // pub fn burn_asset_unlock_nft() -> DispatchResult {} } @@ -144,21 +178,19 @@ pub mod pallet { } fn do_create_asset( - who: OriginFor, - asset_id: ::AssetId, - admin: AccountIdLookupOf, - min_balance: ::Balance, + asset_id: AssetIdOf, + admin: T::AccountId, + min_balance: AssetBalanceOf, ) -> DispatchResult { - >::create(who, asset_id, admin, min_balance) + T::Assets::create(asset_id, admin, true, min_balance) } fn do_mint_asset( - who: OriginFor, - asset_id: ::AssetId, - beneficiary: AccountIdLookupOf, - amount: ::Balance, + asset_id: AssetIdOf, + beneficiary: &T::AccountId, + amount: AssetBalanceOf, ) -> DispatchResult { - >::mint(who, asset_id, beneficiary, amount) + T::Assets::mint_into(asset_id, beneficiary, amount) } } } From a37f8d11933846f70027fb38e04bf038a2bb152c Mon Sep 17 00:00:00 2001 From: lana-shanghai Date: Thu, 17 Nov 2022 12:21:55 +0100 Subject: [PATCH 024/101] cargo fmt --- bin/node-template/node/src/command.rs | 12 +- bin/node-template/node/src/service.rs | 7 +- bin/node/bench/src/core.rs | 8 +- bin/node/bench/src/main.rs | 2 +- bin/node/bench/src/simple_trie.rs | 2 +- bin/node/bench/src/tempdb.rs | 5 +- bin/node/cli/src/command.rs | 17 +- bin/node/cli/src/service.rs | 2 +- bin/node/cli/tests/common.rs | 4 +- bin/node/cli/tests/telemetry.rs | 2 +- bin/node/inspect/src/lib.rs | 6 +- bin/node/runtime/src/impls.rs | 6 +- bin/node/runtime/src/lib.rs | 28 +- bin/node/testing/src/bench.rs | 18 +- client/allocator/src/freeing_bump.rs | 10 +- client/api/src/backend.rs | 2 +- client/api/src/execution_extensions.rs | 11 +- client/api/src/in_mem.rs | 20 +- client/api/src/leaves.rs | 6 +- client/api/src/lib.rs | 6 +- client/api/src/notifications/registry.rs | 9 +- client/authority-discovery/src/worker.rs | 10 +- .../src/worker/addr_cache.rs | 4 +- .../basic-authorship/src/basic_authorship.rs | 26 +- client/beefy/rpc/src/lib.rs | 2 +- client/beefy/src/communication/gossip.rs | 8 +- .../incoming_requests_handler.rs | 4 +- .../outgoing_requests_engine.rs | 6 +- client/beefy/src/justification.rs | 8 +- client/beefy/src/round.rs | 6 +- client/beefy/src/worker.rs | 16 +- client/chain-spec/derive/src/impls.rs | 4 +- client/cli/src/arg_enums.rs | 35 ++- client/cli/src/commands/purge_chain_cmd.rs | 2 +- client/cli/src/commands/run_cmd.rs | 10 +- client/cli/src/commands/vanity.rs | 6 +- client/cli/src/commands/verify.rs | 2 +- client/cli/src/config.rs | 2 +- client/cli/src/params/network_params.rs | 9 +- client/cli/src/params/node_key_params.rs | 8 +- client/cli/src/params/shared_params.rs | 5 +- client/consensus/aura/src/import_queue.rs | 5 +- client/consensus/aura/src/lib.rs | 11 +- client/consensus/babe/src/authorship.rs | 18 +- client/consensus/babe/src/aux_schema.rs | 24 +- client/consensus/babe/src/lib.rs | 70 +++-- client/consensus/babe/src/tests.rs | 2 +- client/consensus/babe/src/verification.rs | 8 +- client/consensus/common/src/block_import.rs | 11 +- client/consensus/common/src/import_queue.rs | 11 +- .../common/src/import_queue/basic_queue.rs | 17 +- .../common/src/import_queue/buffered_link.rs | 15 +- client/consensus/epochs/src/lib.rs | 121 +++++--- .../manual-seal/src/consensus/babe.rs | 7 +- .../consensus/manual-seal/src/seal_block.rs | 9 +- client/consensus/pow/src/lib.rs | 41 +-- client/consensus/pow/src/worker.rs | 8 +- client/consensus/slots/src/aux_schema.rs | 6 +- client/consensus/slots/src/lib.rs | 30 +- client/consensus/slots/src/slots.rs | 4 +- client/db/src/bench.rs | 6 +- client/db/src/lib.rs | 135 +++++---- client/db/src/parity_db.rs | 18 +- client/db/src/upgrade.rs | 5 +- client/db/src/utils.rs | 33 ++- client/executor/benches/bench.rs | 2 +- .../runtime_blob/data_segments_snapshot.rs | 4 +- .../common/src/runtime_blob/runtime_blob.rs | 9 +- client/executor/common/src/sandbox.rs | 20 +- .../common/src/sandbox/wasmer_backend.rs | 19 +- client/executor/runtime-test/src/lib.rs | 4 +- client/executor/src/native_executor.rs | 2 +- client/executor/src/wasm_runtime.rs | 5 +- client/executor/wasmi/src/lib.rs | 21 +- client/executor/wasmtime/src/host.rs | 11 +- client/executor/wasmtime/src/imports.rs | 9 +- .../executor/wasmtime/src/instance_wrapper.rs | 12 +- client/executor/wasmtime/src/runtime.rs | 26 +- client/executor/wasmtime/src/util.rs | 4 +- client/finality-grandpa/src/authorities.rs | 41 +-- client/finality-grandpa/src/aux_schema.rs | 25 +- .../src/communication/gossip.rs | 168 ++++++----- .../finality-grandpa/src/communication/mod.rs | 51 ++-- .../src/communication/periodic.rs | 4 +- .../src/communication/tests.rs | 12 +- client/finality-grandpa/src/environment.rs | 62 ++-- client/finality-grandpa/src/finality_proof.rs | 12 +- client/finality-grandpa/src/import.rs | 36 +-- client/finality-grandpa/src/justification.rs | 19 +- client/finality-grandpa/src/lib.rs | 8 +- client/finality-grandpa/src/observer.rs | 10 +- client/finality-grandpa/src/tests.rs | 8 +- client/finality-grandpa/src/until_imported.rs | 22 +- client/finality-grandpa/src/voting_rule.rs | 14 +- client/finality-grandpa/src/warp_proof.rs | 12 +- client/informant/src/display.rs | 20 +- client/keystore/src/lib.rs | 5 +- client/keystore/src/local.rs | 23 +- client/network-gossip/src/bridge.rs | 14 +- client/network-gossip/src/state_machine.rs | 19 +- client/network/bitswap/src/lib.rs | 19 +- client/network/common/src/config.rs | 5 +- .../network/common/src/service/signature.rs | 4 +- client/network/common/src/utils.rs | 2 +- .../src/light_client_requests/handler.rs | 24 +- client/network/src/behaviour.rs | 75 +++-- client/network/src/discovery.rs | 72 ++--- client/network/src/network_state.rs | 10 +- client/network/src/peer_info.rs | 36 ++- client/network/src/protocol.rs | 136 +++++---- client/network/src/protocol/message.rs | 7 +- .../src/protocol/notifications/behaviour.rs | 104 +++---- .../src/protocol/notifications/handler.rs | 73 ++--- .../src/protocol/notifications/tests.rs | 22 +- .../notifications/upgrade/notifications.rs | 57 ++-- client/network/src/request_responses.rs | 63 ++-- client/network/src/service.rs | 102 ++++--- client/network/src/service/out_events.rs | 10 +- client/network/src/service/tests/mod.rs | 5 +- client/network/src/service/tests/service.rs | 41 +-- .../network/sync/src/block_request_handler.rs | 19 +- client/network/sync/src/blocks.rs | 30 +- client/network/sync/src/extra_requests.rs | 22 +- client/network/sync/src/lib.rs | 272 +++++++++--------- client/network/sync/src/service/network.rs | 10 +- client/network/sync/src/state.rs | 8 +- client/network/sync/src/warp.rs | 9 +- client/network/test/src/lib.rs | 15 +- client/network/test/src/sync.rs | 50 ++-- client/network/transactions/src/lib.rs | 19 +- client/offchain/src/api.rs | 5 +- client/offchain/src/api/http.rs | 69 ++--- client/offchain/src/api/timestamp.rs | 5 +- client/peerset/src/lib.rs | 63 ++-- client/peerset/src/peersstate.rs | 34 +-- client/peerset/tests/fuzz.rs | 19 +- client/rpc-api/src/chain/error.rs | 5 +- client/rpc-api/src/dev/error.rs | 20 +- client/rpc-api/src/state/error.rs | 10 +- client/rpc-api/src/system/error.rs | 5 +- client/rpc-spec-v2/src/transaction/error.rs | 35 ++- client/rpc-spec-v2/src/transaction/event.rs | 45 +-- .../src/transaction/transaction.rs | 22 +- client/rpc/src/author/mod.rs | 4 +- client/rpc/src/dev/mod.rs | 4 +- client/rpc/src/state/mod.rs | 4 +- client/rpc/src/state/state_full.rs | 35 ++- client/rpc/src/system/tests.rs | 12 +- client/service/src/builder.rs | 18 +- client/service/src/chain_ops/export_blocks.rs | 9 +- client/service/src/chain_ops/import_blocks.rs | 25 +- client/service/src/client/block_rules.rs | 4 +- client/service/src/client/client.rs | 120 ++++---- client/service/src/client/wasm_override.rs | 4 +- client/service/src/lib.rs | 12 +- client/service/test/src/lib.rs | 2 +- client/state-db/src/lib.rs | 58 ++-- client/state-db/src/noncanonical.rs | 28 +- client/state-db/src/pruning.rs | 4 +- client/sysinfo/src/sysinfo.rs | 12 +- client/telemetry/src/endpoints.rs | 2 +- client/telemetry/src/lib.rs | 6 +- client/telemetry/src/node.rs | 14 +- client/telemetry/src/transport.rs | 2 +- client/tracing/proc-macro/src/lib.rs | 2 +- client/tracing/src/block/mod.rs | 10 +- client/tracing/src/lib.rs | 20 +- client/tracing/src/logging/event_format.rs | 15 +- .../src/logging/layers/prefix_layer.rs | 4 +- client/tracing/src/logging/stderr_writer.rs | 4 +- .../transaction-pool/src/enactment_state.rs | 6 +- .../transaction-pool/src/graph/base_pool.rs | 24 +- client/transaction-pool/src/graph/future.rs | 4 +- client/transaction-pool/src/graph/pool.rs | 17 +- client/transaction-pool/src/graph/ready.rs | 20 +- client/transaction-pool/src/graph/rotator.rs | 2 +- .../src/graph/validated_pool.rs | 22 +- client/transaction-pool/src/lib.rs | 29 +- client/transaction-pool/src/revalidation.rs | 2 +- client/utils/src/mpsc.rs | 2 +- client/utils/src/status_sinks.rs | 2 +- frame/alliance/src/lib.rs | 8 +- frame/assets/src/functions.rs | 36 +-- frame/assets/src/impl_stored_map.rs | 2 +- frame/assets/src/lib.rs | 2 +- frame/assets/src/types.rs | 2 +- frame/aura/src/lib.rs | 4 +- frame/authorship/src/lib.rs | 29 +- frame/babe/src/equivocation.rs | 2 +- frame/babe/src/lib.rs | 8 +- frame/babe/src/tests.rs | 6 +- frame/bags-list/remote-tests/src/lib.rs | 2 +- frame/bags-list/src/list/mod.rs | 52 ++-- frame/bags-list/src/migrations.rs | 2 +- frame/balances/src/lib.rs | 97 ++++--- frame/beefy-mmr/primitives/src/lib.rs | 4 +- frame/beefy/src/lib.rs | 4 +- frame/benchmarking/src/analysis.rs | 14 +- frame/benchmarking/src/lib.rs | 2 +- frame/benchmarking/src/tests.rs | 2 +- frame/bounties/src/benchmarking.rs | 4 +- frame/bounties/src/lib.rs | 26 +- frame/bounties/src/migrations/v4.rs | 2 +- frame/child-bounties/src/benchmarking.rs | 4 +- frame/child-bounties/src/lib.rs | 24 +- frame/collective/src/lib.rs | 8 +- frame/collective/src/migrations/v4.rs | 6 +- frame/contracts/primitives/src/lib.rs | 20 +- frame/contracts/proc-macro/src/lib.rs | 10 +- frame/contracts/src/benchmarking/code.rs | 10 +- frame/contracts/src/benchmarking/mod.rs | 12 +- frame/contracts/src/exec.rs | 33 +-- frame/contracts/src/lib.rs | 7 +- frame/contracts/src/migration.rs | 2 +- frame/contracts/src/schedule.rs | 43 +-- frame/contracts/src/storage.rs | 19 +- frame/contracts/src/storage/meter.rs | 30 +- frame/contracts/src/tests.rs | 4 +- frame/contracts/src/wasm/mod.rs | 10 +- frame/contracts/src/wasm/prepare.rs | 73 ++--- frame/contracts/src/wasm/runtime.rs | 46 +-- frame/conviction-voting/src/lib.rs | 7 +- frame/conviction-voting/src/tests.rs | 10 +- frame/conviction-voting/src/vote.rs | 20 +- frame/democracy/src/lib.rs | 10 +- frame/democracy/src/migrations.rs | 12 +- frame/democracy/src/vote.rs | 10 +- frame/democracy/src/vote_threshold.rs | 20 +- .../election-provider-multi-phase/src/lib.rs | 8 +- .../election-provider-multi-phase/src/mock.rs | 16 +- .../src/signed.rs | 6 +- .../src/unsigned.rs | 31 +- .../solution-type/src/lib.rs | 8 +- .../solution-type/src/single_page.rs | 2 +- frame/election-provider-support/src/lib.rs | 5 +- frame/election-provider-support/src/mock.rs | 2 +- .../election-provider-support/src/onchain.rs | 2 +- frame/elections-phragmen/src/lib.rs | 8 +- frame/elections-phragmen/src/migrations/v4.rs | 2 +- frame/examples/basic/src/lib.rs | 2 +- frame/examples/offchain-worker/src/lib.rs | 33 ++- frame/executive/src/lib.rs | 29 +- frame/fast-unstake/src/lib.rs | 19 +- frame/fast-unstake/src/mock.rs | 4 +- frame/gilt/src/lib.rs | 6 +- frame/grandpa/src/equivocation.rs | 2 +- frame/grandpa/src/lib.rs | 10 +- frame/grandpa/src/migrations/v4.rs | 2 +- frame/grandpa/src/tests.rs | 4 +- frame/identity/src/benchmarking.rs | 18 +- frame/identity/src/lib.rs | 16 +- frame/identity/src/types.rs | 5 +- frame/im-online/src/lib.rs | 29 +- frame/im-online/src/tests.rs | 15 +- frame/lottery/src/lib.rs | 10 +- frame/membership/src/lib.rs | 2 +- frame/membership/src/migrations/v4.rs | 6 +- frame/merkle-mountain-range/rpc/src/lib.rs | 4 +- frame/merkle-mountain-range/src/lib.rs | 12 +- frame/merkle-mountain-range/src/mmr/mmr.rs | 4 +- .../merkle-mountain-range/src/mmr/storage.rs | 6 +- frame/merkle-mountain-range/src/mmr/utils.rs | 4 +- frame/multisig/src/migrations.rs | 2 +- frame/nft-fractionalisation/src/lib.rs | 3 +- frame/nfts/src/features/attributes.rs | 4 +- frame/nfts/src/features/metadata.rs | 20 +- frame/nfts/src/features/settings.rs | 2 +- frame/nfts/src/features/transfer.rs | 4 +- frame/nfts/src/impl_nonfungibles.rs | 10 +- frame/nfts/src/lib.rs | 9 +- frame/nfts/src/tests.rs | 20 +- frame/node-authorization/src/lib.rs | 4 +- frame/nomination-pools/fuzzer/src/call.rs | 8 +- frame/nomination-pools/src/lib.rs | 45 +-- frame/nomination-pools/src/migration.rs | 10 +- frame/preimage/src/lib.rs | 17 +- frame/preimage/src/migration.rs | 18 +- frame/proxy/src/lib.rs | 22 +- frame/ranked-collective/src/lib.rs | 7 +- frame/ranked-collective/src/tests.rs | 10 +- frame/referenda/src/branch.rs | 22 +- frame/referenda/src/lib.rs | 18 +- frame/referenda/src/types.rs | 25 +- frame/scheduler/src/benchmarking.rs | 6 +- frame/scheduler/src/lib.rs | 31 +- frame/scheduler/src/migration.rs | 6 +- frame/scheduler/src/tests.rs | 36 +-- frame/scored-pool/src/lib.rs | 10 +- frame/session/src/historical/mod.rs | 6 +- frame/session/src/lib.rs | 8 +- frame/session/src/migrations/v1.rs | 6 +- frame/session/src/mock.rs | 4 +- frame/society/src/lib.rs | 18 +- frame/staking/reward-curve/src/lib.rs | 30 +- frame/staking/reward-curve/src/log.rs | 4 +- frame/staking/reward-fn/src/lib.rs | 16 +- frame/staking/src/benchmarking.rs | 2 +- frame/staking/src/inflation.rs | 4 +- frame/staking/src/lib.rs | 8 +- frame/staking/src/migrations.rs | 6 +- frame/staking/src/mock.rs | 4 +- frame/staking/src/pallet/impls.rs | 45 +-- frame/staking/src/pallet/mod.rs | 55 ++-- frame/staking/src/slashing.rs | 10 +- frame/staking/src/tests.rs | 12 +- frame/state-trie-migration/src/lib.rs | 14 +- .../support/procedural/src/clone_no_bound.rs | 2 +- .../src/construct_runtime/expand/event.rs | 2 +- .../src/construct_runtime/expand/origin.rs | 2 +- .../procedural/src/construct_runtime/mod.rs | 12 +- .../procedural/src/construct_runtime/parse.rs | 77 ++--- frame/support/procedural/src/crate_version.rs | 2 +- .../support/procedural/src/debug_no_bound.rs | 2 +- .../procedural/src/default_no_bound.rs | 7 +- .../procedural/src/dummy_part_checker.rs | 2 +- frame/support/procedural/src/key_prefix.rs | 2 +- .../procedural/src/match_and_insert.rs | 10 +- .../procedural/src/pallet/expand/error.rs | 2 +- .../procedural/src/pallet/expand/event.rs | 2 +- .../src/pallet/expand/genesis_build.rs | 2 +- .../src/pallet/expand/genesis_config.rs | 8 +- .../procedural/src/pallet/expand/storage.rs | 39 +-- frame/support/procedural/src/pallet/mod.rs | 2 +- .../procedural/src/pallet/parse/call.rs | 29 +- .../procedural/src/pallet/parse/config.rs | 30 +- .../procedural/src/pallet/parse/error.rs | 8 +- .../procedural/src/pallet/parse/event.rs | 6 +- .../src/pallet/parse/extra_constants.rs | 16 +- .../src/pallet/parse/genesis_build.rs | 2 +- .../src/pallet/parse/genesis_config.rs | 6 +- .../procedural/src/pallet/parse/helper.rs | 18 +- .../procedural/src/pallet/parse/hooks.rs | 4 +- .../procedural/src/pallet/parse/inherent.rs | 8 +- .../procedural/src/pallet/parse/mod.rs | 51 ++-- .../procedural/src/pallet/parse/origin.rs | 6 +- .../src/pallet/parse/pallet_struct.rs | 8 +- .../procedural/src/pallet/parse/storage.rs | 84 +++--- .../procedural/src/pallet/parse/type_value.rs | 12 +- .../src/pallet/parse/validate_unsigned.rs | 8 +- frame/support/procedural/src/pallet_error.rs | 22 +- .../procedural/src/partial_eq_no_bound.rs | 2 +- .../genesis_config/genesis_config_def.rs | 2 +- frame/support/procedural/src/storage/mod.rs | 32 ++- frame/support/procedural/src/storage/parse.rs | 28 +- .../src/storage/print_pallet_upgrade.rs | 4 +- frame/support/procedural/src/storage_alias.rs | 24 +- frame/support/procedural/tools/src/syn_ext.rs | 2 +- frame/support/src/dispatch.rs | 24 +- frame/support/src/hash.rs | 4 +- frame/support/src/lib.rs | 2 +- frame/support/src/storage/child.rs | 25 +- .../src/storage/generator/double_map.rs | 6 +- frame/support/src/storage/generator/map.rs | 6 +- frame/support/src/storage/generator/nmap.rs | 4 +- frame/support/src/storage/migration.rs | 6 +- frame/support/src/storage/mod.rs | 18 +- .../support/src/storage/storage_noop_guard.rs | 2 +- frame/support/src/storage/transactional.rs | 4 +- .../src/traits/tokens/fungible/balanced.rs | 4 +- .../src/traits/tokens/fungibles/balanced.rs | 4 +- frame/support/src/traits/tokens/imbalance.rs | 2 +- .../tokens/imbalance/signed_imbalance.rs | 10 +- frame/support/src/traits/try_runtime.rs | 5 +- .../test/tests/construct_runtime_ui.rs | 2 +- frame/support/test/tests/decl_module_ui.rs | 2 +- frame/support/test/tests/decl_storage_ui.rs | 2 +- .../support/test/tests/derive_no_bound_ui.rs | 2 +- frame/support/test/tests/pallet.rs | 8 +- .../test/tests/pallet_compatibility.rs | 5 +- .../tests/pallet_compatibility_instance.rs | 5 +- frame/support/test/tests/pallet_ui.rs | 2 +- frame/support/test/tests/storage_alias_ui.rs | 2 +- .../src/extensions/check_non_zero_sender.rs | 2 +- frame/system/src/extensions/check_nonce.rs | 4 +- frame/system/src/extensions/check_weight.rs | 37 +-- frame/system/src/lib.rs | 8 +- frame/system/src/offchain.rs | 2 +- frame/tips/src/benchmarking.rs | 6 +- frame/tips/src/lib.rs | 8 +- frame/tips/src/migrations/v4.rs | 6 +- .../asset-tx-payment/src/payment.rs | 2 +- frame/transaction-payment/src/lib.rs | 15 +- frame/transaction-payment/src/payment.rs | 2 +- frame/transaction-storage/src/lib.rs | 4 +- frame/uniques/src/impl_nonfungibles.rs | 2 +- frame/uniques/src/lib.rs | 6 +- frame/utility/src/lib.rs | 14 +- frame/vesting/src/lib.rs | 14 +- frame/vesting/src/tests.rs | 8 +- frame/vesting/src/vesting_info.rs | 4 +- .../api/proc-macro/src/decl_runtime_apis.rs | 12 +- .../api/proc-macro/src/impl_runtime_apis.rs | 6 +- .../proc-macro/src/mock_impl_runtime_apis.rs | 18 +- primitives/api/proc-macro/src/utils.rs | 17 +- primitives/api/test/tests/trybuild.rs | 2 +- primitives/arithmetic/fuzzer/src/biguint.rs | 2 +- .../arithmetic/fuzzer/src/fixed_point.rs | 4 +- .../src/multiply_by_rational_with_rounding.rs | 2 +- primitives/arithmetic/fuzzer/src/normalize.rs | 2 +- primitives/arithmetic/src/biguint.rs | 16 +- primitives/arithmetic/src/fixed_point.rs | 14 +- primitives/arithmetic/src/helpers_128bit.rs | 6 +- primitives/arithmetic/src/lib.rs | 6 +- primitives/arithmetic/src/per_things.rs | 28 +- primitives/arithmetic/src/rational.rs | 4 +- primitives/authorship/src/lib.rs | 2 +- primitives/blockchain/src/backend.rs | 12 +- primitives/blockchain/src/header_metadata.rs | 8 +- primitives/consensus/babe/src/digests.rs | 5 +- primitives/consensus/babe/src/lib.rs | 12 +- primitives/consensus/vrf/src/schnorrkel.rs | 45 +-- .../core/hashing/proc-macro/src/impls.rs | 12 +- .../core/src/bounded/bounded_btree_map.rs | 2 +- .../core/src/bounded/bounded_btree_set.rs | 2 +- primitives/core/src/bounded/bounded_vec.rs | 18 +- primitives/core/src/crypto.rs | 22 +- primitives/core/src/ecdsa.rs | 4 +- primitives/core/src/ed25519.rs | 4 +- primitives/core/src/lib.rs | 9 +- primitives/core/src/offchain/storage.rs | 5 +- primitives/core/src/offchain/testing.rs | 15 +- primitives/core/src/sr25519.rs | 4 +- primitives/database/src/kvdb.rs | 2 +- primitives/debug-derive/src/impls.rs | 5 +- primitives/finality-grandpa/src/lib.rs | 8 +- primitives/inherents/src/lib.rs | 8 +- primitives/io/src/batch_verifier.rs | 8 +- primitives/keystore/src/lib.rs | 8 +- primitives/keystore/src/testing.rs | 7 +- primitives/maybe-compressed-blob/src/lib.rs | 2 +- .../npos-elections/fuzzer/src/common.rs | 10 +- .../fuzzer/src/phragmen_balancing.rs | 8 +- .../fuzzer/src/phragmms_balancing.rs | 8 +- primitives/npos-elections/src/balancing.rs | 10 +- primitives/npos-elections/src/lib.rs | 2 +- primitives/npos-elections/src/mock.rs | 10 +- primitives/npos-elections/src/node.rs | 4 +- primitives/npos-elections/src/phragmen.rs | 2 +- primitives/npos-elections/src/phragmms.rs | 2 +- primitives/npos-elections/src/pjr.rs | 6 +- primitives/npos-elections/src/reduce.rs | 24 +- .../runtime-interface/proc-macro/src/lib.rs | 2 +- .../proc-macro/src/pass_by/enum_.rs | 5 +- .../proc-macro/src/pass_by/inner.rs | 4 +- .../host_function_interface.rs | 5 +- .../src/runtime_interface/trait_decl_impl.rs | 2 +- .../runtime-interface/proc-macro/src/utils.rs | 14 +- primitives/runtime-interface/src/impls.rs | 2 +- primitives/runtime-interface/tests/ui.rs | 2 +- primitives/runtime/src/curve.rs | 6 +- primitives/runtime/src/generic/digest.rs | 30 +- primitives/runtime/src/generic/era.rs | 4 +- primitives/runtime/src/generic/header.rs | 10 +- .../src/generic/unchecked_extrinsic.rs | 6 +- primitives/runtime/src/lib.rs | 24 +- primitives/runtime/src/offchain/http.rs | 8 +- .../runtime/src/offchain/storage_lock.rs | 8 +- primitives/runtime/src/traits.rs | 4 +- .../runtime/src/transaction_validity.rs | 25 +- primitives/sandbox/src/embedded_executor.rs | 10 +- primitives/state-machine/src/basic.rs | 15 +- primitives/state-machine/src/ext.rs | 16 +- primitives/state-machine/src/lib.rs | 87 +++--- .../src/overlayed_changes/changeset.rs | 6 +- primitives/state-machine/src/trie_backend.rs | 4 +- .../state-machine/src/trie_backend_essence.rs | 32 ++- primitives/storage/src/lib.rs | 5 +- primitives/timestamp/src/lib.rs | 4 +- .../transaction-storage-proof/src/lib.rs | 9 +- primitives/trie/src/cache/mod.rs | 4 +- primitives/trie/src/cache/shared_cache.rs | 21 +- primitives/trie/src/node_codec.rs | 41 +-- primitives/trie/src/node_header.rs | 26 +- primitives/trie/src/trie_codec.rs | 14 +- primitives/trie/src/trie_stream.rs | 20 +- .../proc-macro/src/decl_runtime_version.rs | 25 +- primitives/version/src/lib.rs | 10 +- test-utils/client/src/lib.rs | 4 +- test-utils/derive/src/lib.rs | 2 +- test-utils/runtime/src/lib.rs | 31 +- test-utils/runtime/src/system.rs | 19 +- .../runtime/transaction-pool/src/lib.rs | 31 +- utils/build-script-utils/src/git.rs | 4 +- utils/fork-tree/src/lib.rs | 80 +++--- .../frame/benchmarking-cli/src/block/bench.rs | 2 +- .../benchmarking-cli/src/extrinsic/bench.rs | 4 +- .../benchmarking-cli/src/extrinsic/cmd.rs | 7 +- .../frame/benchmarking-cli/src/machine/mod.rs | 2 +- .../benchmarking-cli/src/overhead/template.rs | 2 +- .../benchmarking-cli/src/pallet/command.rs | 36 +-- .../benchmarking-cli/src/pallet/writer.rs | 10 +- .../benchmarking-cli/src/shared/stats.rs | 2 +- .../src/shared/weight_params.rs | 2 +- .../frame/benchmarking-cli/src/storage/cmd.rs | 2 +- .../benchmarking-cli/src/storage/write.rs | 6 +- .../frame-utilities-cli/src/pallet_id.rs | 2 +- utils/frame/generate-bags/src/lib.rs | 4 +- utils/frame/remote-externalities/src/lib.rs | 30 +- .../rpc/state-trie-migration-rpc/src/lib.rs | 9 +- utils/frame/try-runtime/cli/src/lib.rs | 27 +- utils/wasm-builder/src/builder.rs | 6 +- utils/wasm-builder/src/lib.rs | 5 +- utils/wasm-builder/src/prerequisites.rs | 7 +- utils/wasm-builder/src/wasm_project.rs | 32 ++- 504 files changed, 4237 insertions(+), 3621 deletions(-) diff --git a/bin/node-template/node/src/command.rs b/bin/node-template/node/src/command.rs index 6d293b7b85fcc..c4d944d7f2250 100644 --- a/bin/node-template/node/src/command.rs +++ b/bin/node-template/node/src/command.rs @@ -39,8 +39,9 @@ impl SubstrateCli for Cli { Ok(match id { "dev" => Box::new(chain_spec::development_config()?), "" | "local" => Box::new(chain_spec::local_testnet_config()?), - path => - Box::new(chain_spec::ChainSpec::from_json_file(std::path::PathBuf::from(path))?), + path => { + Box::new(chain_spec::ChainSpec::from_json_file(std::path::PathBuf::from(path))?) + }, }) } @@ -118,7 +119,7 @@ pub fn run() -> sc_cli::Result<()> { "Runtime benchmarking wasn't enabled when building the node. \ You can enable it with `--features runtime-benchmarks`." .into(), - ) + ); } cmd.run::(config) @@ -167,8 +168,9 @@ pub fn run() -> sc_cli::Result<()> { cmd.run(client, inherent_benchmark_data()?, Vec::new(), &ext_factory) }, - BenchmarkCmd::Machine(cmd) => - cmd.run(&config, SUBSTRATE_REFERENCE_HARDWARE.clone()), + BenchmarkCmd::Machine(cmd) => { + cmd.run(&config, SUBSTRATE_REFERENCE_HARDWARE.clone()) + }, } }) }, diff --git a/bin/node-template/node/src/service.rs b/bin/node-template/node/src/service.rs index ee8464688c79c..adc453b615f3e 100644 --- a/bin/node-template/node/src/service.rs +++ b/bin/node-template/node/src/service.rs @@ -59,7 +59,7 @@ pub fn new_partial( ServiceError, > { if config.keystore_remote.is_some() { - return Err(ServiceError::Other("Remote Keystores are not supported.".into())) + return Err(ServiceError::Other("Remote Keystores are not supported.".into())); } let telemetry = config @@ -170,11 +170,12 @@ pub fn new_full(mut config: Configuration) -> Result if let Some(url) = &config.keystore_remote { match remote_keystore(url) { Ok(k) => keystore_container.set_remote_keystore(k), - Err(e) => + Err(e) => { return Err(ServiceError::Other(format!( "Error hooking up remote keystore for {}: {}", url, e - ))), + ))) + }, }; } let grandpa_protocol_name = sc_finality_grandpa::protocol_standard_name( diff --git a/bin/node/bench/src/core.rs b/bin/node/bench/src/core.rs index b6ad3ecd80068..72e9507e97be9 100644 --- a/bin/node/bench/src/core.rs +++ b/bin/node/bench/src/core.rs @@ -74,19 +74,19 @@ impl fmt::Display for NsFormatter { let v = self.0; if v < 100 { - return write!(f, "{} ns", v) + return write!(f, "{} ns", v); } if self.0 < 100_000 { - return write!(f, "{:.1} µs", v as f64 / 1000.0) + return write!(f, "{:.1} µs", v as f64 / 1000.0); } if self.0 < 1_000_000 { - return write!(f, "{:.4} ms", v as f64 / 1_000_000.0) + return write!(f, "{:.4} ms", v as f64 / 1_000_000.0); } if self.0 < 100_000_000 { - return write!(f, "{:.1} ms", v as f64 / 1_000_000.0) + return write!(f, "{:.1} ms", v as f64 / 1_000_000.0); } write!(f, "{:.4} s", v as f64 / 1_000_000_000.0) diff --git a/bin/node/bench/src/main.rs b/bin/node/bench/src/main.rs index 8a5d99640eb1b..a1c7e73d13e3f 100644 --- a/bin/node/bench/src/main.rs +++ b/bin/node/bench/src/main.rs @@ -164,7 +164,7 @@ fn main() { println!("{}: {}", benchmark.name(), benchmark.path().full()) } } - return + return; } let mut results = Vec::new(); diff --git a/bin/node/bench/src/simple_trie.rs b/bin/node/bench/src/simple_trie.rs index aa9c96a1cbd3f..04f307821f33e 100644 --- a/bin/node/bench/src/simple_trie.rs +++ b/bin/node/bench/src/simple_trie.rs @@ -45,7 +45,7 @@ impl<'a> HashDB for SimpleTrie<'a> { fn get(&self, key: &Hash, prefix: Prefix) -> Option { let key = sp_trie::prefixed_key::(key, prefix); if let Some(value) = self.overlay.get(&key) { - return value.clone() + return value.clone(); } self.db.get(0, &key).expect("Database backend error") } diff --git a/bin/node/bench/src/tempdb.rs b/bin/node/bench/src/tempdb.rs index eb3bb1d3fccd7..5fa052ab16563 100644 --- a/bin/node/bench/src/tempdb.rs +++ b/bin/node/bench/src/tempdb.rs @@ -46,8 +46,9 @@ impl KeyValueDB for ParityDbWrapper { fn write(&self, transaction: DBTransaction) -> io::Result<()> { self.0 .commit(transaction.ops.iter().map(|op| match op { - kvdb::DBOp::Insert { col, key, value } => - (*col as u8, &key[key.len() - 32..], Some(value.to_vec())), + kvdb::DBOp::Insert { col, key, value } => { + (*col as u8, &key[key.len() - 32..], Some(value.to_vec())) + }, kvdb::DBOp::Delete { col, key } => (*col as u8, &key[key.len() - 32..], None), kvdb::DBOp::DeletePrefix { col: _, prefix: _ } => unimplemented!(), })) diff --git a/bin/node/cli/src/command.rs b/bin/node/cli/src/command.rs index 108d7743843b6..ac3057df89e78 100644 --- a/bin/node/cli/src/command.rs +++ b/bin/node/cli/src/command.rs @@ -59,17 +59,19 @@ impl SubstrateCli for Cli { fn load_spec(&self, id: &str) -> std::result::Result, String> { let spec = match id { - "" => + "" => { return Err( "Please specify which chain you want to run, e.g. --dev or --chain=local" .into(), - ), + ) + }, "dev" => Box::new(chain_spec::development_config()), "local" => Box::new(chain_spec::local_testnet_config()), "fir" | "flaming-fir" => Box::new(chain_spec::flaming_fir_config()?), "staging" => Box::new(chain_spec::staging_testnet_config()), - path => - Box::new(chain_spec::ChainSpec::from_json_file(std::path::PathBuf::from(path))?), + path => { + Box::new(chain_spec::ChainSpec::from_json_file(std::path::PathBuf::from(path))?) + }, }; Ok(spec) } @@ -109,7 +111,7 @@ pub fn run() -> Result<()> { "Runtime benchmarking wasn't enabled when building the node. \ You can enable it with `--features runtime-benchmarks`." .into(), - ) + ); } cmd.run::(config) @@ -166,8 +168,9 @@ pub fn run() -> Result<()> { &ext_factory, ) }, - BenchmarkCmd::Machine(cmd) => - cmd.run(&config, SUBSTRATE_REFERENCE_HARDWARE.clone()), + BenchmarkCmd::Machine(cmd) => { + cmd.run(&config, SUBSTRATE_REFERENCE_HARDWARE.clone()) + }, } }) }, diff --git a/bin/node/cli/src/service.rs b/bin/node/cli/src/service.rs index 6c29f0c08ee13..5a95bd4228fbe 100644 --- a/bin/node/cli/src/service.rs +++ b/bin/node/cli/src/service.rs @@ -686,7 +686,7 @@ mod tests { sc_consensus_babe::authorship::claim_slot(slot.into(), &epoch, &keystore) .map(|(digest, _)| digest) { - break (babe_pre_digest, epoch_descriptor) + break (babe_pre_digest, epoch_descriptor); } slot += 1; diff --git a/bin/node/cli/tests/common.rs b/bin/node/cli/tests/common.rs index 358c09779d59a..7da4eea5191a3 100644 --- a/bin/node/cli/tests/common.rs +++ b/bin/node/cli/tests/common.rs @@ -47,7 +47,7 @@ pub fn wait_for(child: &mut Child, secs: u64) -> Result { let result = wait_timeout::ChildExt::wait_timeout(child, Duration::from_secs(secs - 5)) .map_err(|_| ())?; if let Some(exit_status) = result { - return Ok(exit_status) + return Ok(exit_status); } } eprintln!("Took too long to exit (> {} seconds). Killing...", secs); @@ -78,7 +78,7 @@ pub async fn wait_n_finalized_blocks_from(n: usize, url: &str) { if let Ok(block) = ChainApi::<(), Hash, Header, ()>::finalized_head(&rpc).await { built_blocks.insert(block); if built_blocks.len() > n { - break + break; } }; interval.tick().await; diff --git a/bin/node/cli/tests/telemetry.rs b/bin/node/cli/tests/telemetry.rs index bef4e4ea03048..fb9e0a9d38858 100644 --- a/bin/node/cli/tests/telemetry.rs +++ b/bin/node/cli/tests/telemetry.rs @@ -54,7 +54,7 @@ async fn telemetry_works() { let object = json.as_object().unwrap().get("payload").unwrap().as_object().unwrap(); if matches!(object.get("best"), Some(serde_json::Value::String(_))) { - break + break; } }, diff --git a/bin/node/inspect/src/lib.rs b/bin/node/inspect/src/lib.rs index 528dce14f46a5..c86dbe2d15e3b 100644 --- a/bin/node/inspect/src/lib.rs +++ b/bin/node/inspect/src/lib.rs @@ -210,12 +210,12 @@ impl FromStr for BlockAddress { fn from_str(s: &str) -> Result { // try to parse hash first if let Ok(hash) = s.parse() { - return Ok(Self::Hash(hash)) + return Ok(Self::Hash(hash)); } // then number if let Ok(number) = s.parse() { - return Ok(Self::Number(number)) + return Ok(Self::Number(number)); } // then assume it's bytes (hex-encoded) @@ -243,7 +243,7 @@ impl FromStr for ExtrinsicAddres fn from_str(s: &str) -> Result { // first try raw bytes if let Ok(bytes) = sp_core::bytes::from_hex(s).map(Self::Bytes) { - return Ok(bytes) + return Ok(bytes); } // split by a bunch of different characters diff --git a/bin/node/runtime/src/impls.rs b/bin/node/runtime/src/impls.rs index 0a5c797ba729f..9dc9be6b76e18 100644 --- a/bin/node/runtime/src/impls.rs +++ b/bin/node/runtime/src/impls.rs @@ -260,7 +260,7 @@ mod multiplier_tests { let next = runtime_multiplier_update(fm); fm = next; if fm == min_multiplier() { - break + break; } iterations += 1; } @@ -288,8 +288,8 @@ mod multiplier_tests { // `cargo test congested_chain_simulation -- --nocapture` to get some insight. // almost full. The entire quota of normal transactions is taken. - let block_weight = BlockWeights::get().get(DispatchClass::Normal).max_total.unwrap() - - Weight::from_ref_time(100); + let block_weight = BlockWeights::get().get(DispatchClass::Normal).max_total.unwrap() + - Weight::from_ref_time(100); // Default substrate weight. let tx_weight = frame_support::weights::constants::ExtrinsicBaseWeight::get(); diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index 855723b6f43c8..b346acd1dc195 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -296,21 +296,21 @@ impl InstanceFilter for ProxyType { ProxyType::Any => true, ProxyType::NonTransfer => !matches!( c, - RuntimeCall::Balances(..) | - RuntimeCall::Assets(..) | - RuntimeCall::Uniques(..) | - RuntimeCall::Nfts(..) | - RuntimeCall::Vesting(pallet_vesting::Call::vested_transfer { .. }) | - RuntimeCall::Indices(pallet_indices::Call::transfer { .. }) + RuntimeCall::Balances(..) + | RuntimeCall::Assets(..) + | RuntimeCall::Uniques(..) + | RuntimeCall::Nfts(..) + | RuntimeCall::Vesting(pallet_vesting::Call::vested_transfer { .. }) + | RuntimeCall::Indices(pallet_indices::Call::transfer { .. }) ), ProxyType::Governance => matches!( c, - RuntimeCall::Democracy(..) | - RuntimeCall::Council(..) | - RuntimeCall::Society(..) | - RuntimeCall::TechnicalCommittee(..) | - RuntimeCall::Elections(..) | - RuntimeCall::Treasury(..) + RuntimeCall::Democracy(..) + | RuntimeCall::Council(..) + | RuntimeCall::Society(..) + | RuntimeCall::TechnicalCommittee(..) + | RuntimeCall::Elections(..) + | RuntimeCall::Treasury(..) ), ProxyType::Staking => matches!(c, RuntimeCall::Staking(..)), } @@ -670,8 +670,8 @@ impl Get> for OffchainRandomBalancing { max => { let seed = sp_io::offchain::random_seed(); let random = ::decode(&mut TrailingZeroInput::new(&seed)) - .expect("input is padded with zeroes; qed") % - max.saturating_add(1); + .expect("input is padded with zeroes; qed") + % max.saturating_add(1); random as usize }, }; diff --git a/bin/node/testing/src/bench.rs b/bin/node/testing/src/bench.rs index 59f1fa94c9b20..4ce6fa518222b 100644 --- a/bin/node/testing/src/bench.rs +++ b/bin/node/testing/src/bench.rs @@ -95,7 +95,7 @@ pub fn drop_system_cache() { target: "bench-logistics", "Clearing system cache on windows is not supported. Benchmark might totally be wrong.", ); - return + return; } std::process::Command::new("sync") @@ -291,7 +291,7 @@ impl<'a> Iterator for BlockContentIterator<'a> { fn next(&mut self) -> Option { if self.content.size.map(|size| size <= self.iteration).unwrap_or(false) { - return None + return None; } let sender = self.keyring.at(self.iteration); @@ -307,22 +307,24 @@ impl<'a> Iterator for BlockContentIterator<'a> { signed_extra(0, kitchensink_runtime::ExistentialDeposit::get() + 1), )), function: match self.content.block_type { - BlockType::RandomTransfersKeepAlive => + BlockType::RandomTransfersKeepAlive => { RuntimeCall::Balances(BalancesCall::transfer_keep_alive { dest: sp_runtime::MultiAddress::Id(receiver), value: kitchensink_runtime::ExistentialDeposit::get() + 1, - }), + }) + }, BlockType::RandomTransfersReaping => { RuntimeCall::Balances(BalancesCall::transfer { dest: sp_runtime::MultiAddress::Id(receiver), // Transfer so that ending balance would be 1 less than existential // deposit so that we kill the sender account. - value: 100 * DOLLARS - - (kitchensink_runtime::ExistentialDeposit::get() - 1), + value: 100 * DOLLARS + - (kitchensink_runtime::ExistentialDeposit::get() - 1), }) }, - BlockType::Noop => - RuntimeCall::System(SystemCall::remark { remark: Vec::new() }), + BlockType::Noop => { + RuntimeCall::System(SystemCall::remark { remark: Vec::new() }) + }, }, }, self.runtime_version.spec_version, diff --git a/client/allocator/src/freeing_bump.rs b/client/allocator/src/freeing_bump.rs index e81d1b79e74ed..c1bc1bc9eb6f7 100644 --- a/client/allocator/src/freeing_bump.rs +++ b/client/allocator/src/freeing_bump.rs @@ -141,7 +141,7 @@ impl Order { fn from_size(size: u32) -> Result { let clamped_size = if size > MAX_POSSIBLE_ALLOCATION { log::warn!(target: LOG_TARGET, "going to fail due to allocating {:?}", size); - return Err(Error::RequestedAllocationTooLarge) + return Err(Error::RequestedAllocationTooLarge); } else if size < MIN_POSSIBLE_ALLOCATION { MIN_POSSIBLE_ALLOCATION } else { @@ -401,7 +401,7 @@ impl FreeingBumpHeapAllocator { size: WordSize, ) -> Result, Error> { if self.poisoned { - return Err(error("the allocator has been poisoned")) + return Err(error("the allocator has been poisoned")); } let bomb = PoisonBomb { poisoned: &mut self.poisoned }; @@ -463,7 +463,7 @@ impl FreeingBumpHeapAllocator { ptr: Pointer, ) -> Result<(), Error> { if self.poisoned { - return Err(error("the allocator has been poisoned")) + return Err(error("the allocator has been poisoned")); } let bomb = PoisonBomb { poisoned: &mut self.poisoned }; @@ -511,7 +511,7 @@ impl FreeingBumpHeapAllocator { bumper, heap_end ); - return Err(Error::AllocatorOutOfSpace) + return Err(Error::AllocatorOutOfSpace); } let res = *bumper; @@ -524,7 +524,7 @@ impl FreeingBumpHeapAllocator { mem: &mut M, ) -> Result<(), Error> { if mem.size() < *last_observed_memory_size { - return Err(Error::MemoryShrinked) + return Err(Error::MemoryShrinked); } *last_observed_memory_size = mem.size(); Ok(()) diff --git a/client/api/src/backend.rs b/client/api/src/backend.rs index 79cc0d7a16bcc..226717b54f23c 100644 --- a/client/api/src/backend.rs +++ b/client/api/src/backend.rs @@ -346,7 +346,7 @@ where // this terminates the iterator the first time it fails. if let Some(prefix) = self.prefix { if !next_key.starts_with(&prefix.0[..]) { - return None + return None; } } self.current_key = next_key.clone(); diff --git a/client/api/src/execution_extensions.rs b/client/api/src/execution_extensions.rs index 07a483bc3eaf2..971cef2dadbdf 100644 --- a/client/api/src/execution_extensions.rs +++ b/client/api/src/execution_extensions.rs @@ -176,8 +176,8 @@ impl ExecutionExtensions { } } - if capabilities.contains(offchain::Capabilities::OFFCHAIN_DB_READ) || - capabilities.contains(offchain::Capabilities::OFFCHAIN_DB_WRITE) + if capabilities.contains(offchain::Capabilities::OFFCHAIN_DB_READ) + || capabilities.contains(offchain::Capabilities::OFFCHAIN_DB_WRITE) { if let Some(offchain_db) = self.offchain_db.as_ref() { extensions.register(OffchainDbExt::new(offchain::LimitedExternalities::new( @@ -210,8 +210,9 @@ impl ExecutionExtensions { ExecutionContext::BlockConstruction => self.strategies.block_construction.get_manager(), ExecutionContext::Syncing => self.strategies.syncing.get_manager(), ExecutionContext::Importing => self.strategies.importing.get_manager(), - ExecutionContext::OffchainCall(Some((_, capabilities))) if capabilities.is_all() => - self.strategies.offchain_worker.get_manager(), + ExecutionContext::OffchainCall(Some((_, capabilities))) if capabilities.is_all() => { + self.strategies.offchain_worker.get_manager() + }, ExecutionContext::OffchainCall(_) => self.strategies.other.get_manager(), }; @@ -231,7 +232,7 @@ impl offchain::TransactionPool for TransactionPoolAdapter< Ok(xt) => xt, Err(e) => { log::warn!("Unable to decode extrinsic: {:?}: {}", data, e); - return Err(()) + return Err(()); }, }; diff --git a/client/api/src/in_mem.rs b/client/api/src/in_mem.rs index 5a3e25ab5987b..93adb1b07f28b 100644 --- a/client/api/src/in_mem.rs +++ b/client/api/src/in_mem.rs @@ -198,7 +198,7 @@ impl Blockchain { pub fn equals_to(&self, other: &Self) -> bool { // Check ptr equality first to avoid double read locks. if ptr::eq(self, other) { - return true + return true; } self.canon_equals_to(other) && self.storage.read().blocks == other.storage.read().blocks } @@ -207,14 +207,14 @@ impl Blockchain { pub fn canon_equals_to(&self, other: &Self) -> bool { // Check ptr equality first to avoid double read locks. if ptr::eq(self, other) { - return true + return true; } let this = self.storage.read(); let other = other.storage.read(); - this.hashes == other.hashes && - this.best_hash == other.best_hash && - this.best_number == other.best_number && - this.genesis_hash == other.genesis_hash + this.hashes == other.hashes + && this.best_hash == other.best_hash + && this.best_number == other.best_number + && this.genesis_hash == other.genesis_hash } /// Insert header CHT root. @@ -313,7 +313,7 @@ impl Blockchain { if !stored_justifications.append(justification) { return Err(sp_blockchain::Error::BadJustification( "Duplicate consensus engine ID".into(), - )) + )); } } else { *block_justifications = Some(Justifications::from(justification)); @@ -761,7 +761,7 @@ where fn state_at(&self, hash: Block::Hash) -> sp_blockchain::Result { if hash == Default::default() { - return Ok(Self::State::default()) + return Ok(Self::State::default()); } self.states @@ -797,7 +797,7 @@ impl backend::LocalBackend for Backend where Block: /// Check that genesis storage is valid. pub fn check_genesis_storage(storage: &Storage) -> sp_blockchain::Result<()> { if storage.top.iter().any(|(k, _)| well_known_keys::is_child_storage_key(k)) { - return Err(sp_blockchain::Error::InvalidState) + return Err(sp_blockchain::Error::InvalidState); } if storage @@ -805,7 +805,7 @@ pub fn check_genesis_storage(storage: &Storage) -> sp_blockchain::Result<()> { .keys() .any(|child_key| !well_known_keys::is_child_storage_key(child_key)) { - return Err(sp_blockchain::Error::InvalidState) + return Err(sp_blockchain::Error::InvalidState); } Ok(()) diff --git a/client/api/src/leaves.rs b/client/api/src/leaves.rs index cdcb80a110b74..3766e19c51add 100644 --- a/client/api/src/leaves.rs +++ b/client/api/src/leaves.rs @@ -135,7 +135,7 @@ where let number = Reverse(number); if !self.remove_leaf(&number, &hash) { - return None + return None; } let inserted = parent_hash.and_then(|parent_hash| { @@ -160,7 +160,7 @@ where /// will be pruned soon afterwards anyway. pub fn finalize_height(&mut self, number: N) -> FinalizationOutcome { let boundary = if number == N::zero() { - return FinalizationOutcome { removed: BTreeMap::new() } + return FinalizationOutcome { removed: BTreeMap::new() }; } else { number - N::one() }; @@ -176,7 +176,7 @@ where /// Returns the leaves that would be displaced by finalizing the given block. pub fn displaced_by_finalize_height(&self, number: N) -> FinalizationOutcome { let boundary = if number == N::zero() { - return FinalizationOutcome { removed: BTreeMap::new() } + return FinalizationOutcome { removed: BTreeMap::new() }; } else { number - N::one() }; diff --git a/client/api/src/lib.rs b/client/api/src/lib.rs index 3d21f12f6940b..0ab9738153569 100644 --- a/client/api/src/lib.rs +++ b/client/api/src/lib.rs @@ -66,7 +66,7 @@ pub mod utils { { move |base, hash| { if base == hash { - return Ok(false) + return Ok(false); } let current = current.as_ref().map(|(c, p)| (c.borrow(), p.borrow())); @@ -74,11 +74,11 @@ pub mod utils { let mut hash = hash; if let Some((current_hash, current_parent_hash)) = current { if base == current_hash { - return Ok(false) + return Ok(false); } if hash == current_hash { if base == current_parent_hash { - return Ok(true) + return Ok(true); } else { hash = current_parent_hash; } diff --git a/client/api/src/notifications/registry.rs b/client/api/src/notifications/registry.rs index 882d6ed40be67..721bb67198ad4 100644 --- a/client/api/src/notifications/registry.rs +++ b/client/api/src/notifications/registry.rs @@ -183,7 +183,7 @@ impl Registry { // early exit if no listeners if !has_wildcard && self.listeners.is_empty() && self.child_listeners.is_empty() { - return + return; } let mut subscribers = self.wildcard_listeners.clone(); @@ -229,7 +229,7 @@ impl Registry { // Don't send empty notifications if changes.is_empty() && child_changes.is_empty() { - return + return; } let changes = Arc::<[_]>::from(changes); @@ -299,7 +299,7 @@ impl Registry { None => { wildcards.remove(&subscriber); }, - Some(filters) => + Some(filters) => { for key in filters.iter() { let remove_key = match listeners.get_mut(key) { Some(ref mut set) => { @@ -312,7 +312,8 @@ impl Registry { if remove_key { listeners.remove(key); } - }, + } + }, } } diff --git a/client/authority-discovery/src/worker.rs b/client/authority-discovery/src/worker.rs index 4121b64e00b9b..92912081e6347 100644 --- a/client/authority-discovery/src/worker.rs +++ b/client/authority-discovery/src/worker.rs @@ -307,7 +307,7 @@ where .into_iter() .filter(move |a| { if publish_non_global_ips { - return true + return true; } a.iter().all(|p| match p { @@ -343,7 +343,7 @@ where ).await?.into_iter().map(Into::into).collect::>(); if only_if_changed && keys == self.latest_published_keys { - return Ok(()) + return Ok(()); } let addresses = serialize_addresses(self.addresses_to_publish()); @@ -515,7 +515,7 @@ where .map_err(Error::EncodingDecodingScale)?; if !AuthorityPair::verify(&auth_signature, &record, &authority_id) { - return Err(Error::VerifyingDhtPayload) + return Err(Error::VerifyingDhtPayload); } let addresses: Vec = schema::AuthorityRecord::decode(record.as_slice()) @@ -554,10 +554,10 @@ where let signature = Signature { public_key, bytes: peer_signature.signature }; if !signature.verify(record, &remote_peer_id) { - return Err(Error::VerifyingDhtPayload) + return Err(Error::VerifyingDhtPayload); } } else if self.strict_record_validation { - return Err(Error::MissingPeerIdSignature) + return Err(Error::MissingPeerIdSignature); } else { debug!( target: LOG_TARGET, diff --git a/client/authority-discovery/src/worker/addr_cache.rs b/client/authority-discovery/src/worker/addr_cache.rs index 19bbbf0b62e7e..f6688504b1026 100644 --- a/client/authority-discovery/src/worker/addr_cache.rs +++ b/client/authority-discovery/src/worker/addr_cache.rs @@ -59,7 +59,7 @@ impl AddrCache { addresses, ); - return + return; } else if peer_ids.len() > 1 { log::warn!( target: super::LOG_TARGET, @@ -144,7 +144,7 @@ impl AddrCache { { addresses } else { - continue + continue; }; self.remove_authority_id_from_peer_ids( diff --git a/client/basic-authorship/src/basic_authorship.rs b/client/basic-authorship/src/basic_authorship.rs index b69294bf6ccb0..45f2331a24107 100644 --- a/client/basic-authorship/src/basic_authorship.rs +++ b/client/basic-authorship/src/basic_authorship.rs @@ -367,7 +367,7 @@ where error!( "❌️ Mandatory inherent extrinsic returned error. Block cannot be produced." ); - return Err(ApplyExtrinsicFailed(Validity(e))) + return Err(ApplyExtrinsicFailed(Validity(e))); }, Err(e) => { warn!("❗️ Inherent extrinsic returned unexpected error: {}. Dropping.", e); @@ -413,7 +413,7 @@ where let pending_tx = if let Some(pending_tx) = pending_iterator.next() { pending_tx } else { - break EndProposingReason::NoMoreTransactions + break EndProposingReason::NoMoreTransactions; }; let now = (self.now)(); @@ -422,7 +422,7 @@ where "Consensus deadline reached when pushing block transactions, \ proceeding with proposing." ); - break EndProposingReason::HitDeadline + break EndProposingReason::HitDeadline; } let pending_tx_data = pending_tx.data().clone(); @@ -439,17 +439,17 @@ where but will try {} more transactions before quitting.", MAX_SKIPPED_TRANSACTIONS - skipped, ); - continue + continue; } else if now < soft_deadline { debug!( "Transaction would overflow the block size limit, \ but we still have time before the soft deadline, so \ we will try a bit more." ); - continue + continue; } else { debug!("Reached block size limit, proceeding with proposing."); - break EndProposingReason::HitBlockSizeLimit + break EndProposingReason::HitBlockSizeLimit; } } @@ -474,7 +474,7 @@ where ); } else { debug!("Reached block weight limit, proceeding with proposing."); - break EndProposingReason::HitBlockWeightLimit + break EndProposingReason::HitBlockWeightLimit; } }, Err(e) if skipped > 0 => { @@ -633,7 +633,7 @@ mod tests { let mut value = cell.lock(); if !value.0 { value.0 = true; - return value.1 + return value.1; } let old = value.1; let new = old + time::Duration::from_secs(1); @@ -677,7 +677,7 @@ mod tests { let mut value = cell.lock(); if !value.0 { value.0 = true; - return value.1 + return value.1; } let new = value.1 + time::Duration::from_secs(160); *value = (true, new); @@ -868,13 +868,13 @@ mod tests { .chain((0..extrinsics_num - 1).map(|v| Extrinsic::IncludeData(vec![v as u8; 10]))) .collect::>(); - let block_limit = genesis_header.encoded_size() + - extrinsics + let block_limit = genesis_header.encoded_size() + + extrinsics .iter() .take(extrinsics_num - 1) .map(Encode::encoded_size) - .sum::() + - Vec::::new().encoded_size(); + .sum::() + + Vec::::new().encoded_size(); block_on(txpool.submit_at(&BlockId::number(0), SOURCE, extrinsics)).unwrap(); diff --git a/client/beefy/rpc/src/lib.rs b/client/beefy/rpc/src/lib.rs index d29ed433c38db..55767f8c1b3f0 100644 --- a/client/beefy/rpc/src/lib.rs +++ b/client/beefy/rpc/src/lib.rs @@ -234,7 +234,7 @@ mod tests { if response.result != not_ready { assert_eq!(response.result, expected); // Success - return + return; } std::thread::sleep(std::time::Duration::from_millis(50)) } diff --git a/client/beefy/src/communication/gossip.rs b/client/beefy/src/communication/gossip.rs index 520548b943f96..57d921fd9d10d 100644 --- a/client/beefy/src/communication/gossip.rs +++ b/client/beefy/src/communication/gossip.rs @@ -156,18 +156,18 @@ where let known_votes = self.known_votes.read(); if !known_votes.is_live(&round) { - return ValidationResult::Discard + return ValidationResult::Discard; } if known_votes.is_known(&round, &msg_hash) { - return ValidationResult::ProcessAndKeep(self.topic) + return ValidationResult::ProcessAndKeep(self.topic); } } if BeefyKeystore::verify(&msg.id, &msg.signature, &msg.commitment.encode()) { self.known_votes.write().add_known(&round, msg_hash); self.known_peers.lock().note_vote_for(*sender, round); - return ValidationResult::ProcessAndKeep(self.topic) + return ValidationResult::ProcessAndKeep(self.topic); } else { // TODO: report peer debug!(target: "beefy", "🥩 Bad signature on message: {:?}, from: {:?}", msg, sender); @@ -211,7 +211,7 @@ where let known_votes = self.known_votes.read(); Box::new(move |_who, intent, _topic, mut data| { if let MessageIntent::PeriodicRebroadcast = intent { - return do_rebroadcast + return do_rebroadcast; } let msg = match VoteMessage::, Public, Signature>::decode(&mut data) { diff --git a/client/beefy/src/communication/request_response/incoming_requests_handler.rs b/client/beefy/src/communication/request_response/incoming_requests_handler.rs index 9f02b7162b54c..d3ff1379ba656 100644 --- a/client/beefy/src/communication/request_response/incoming_requests_handler.rs +++ b/client/beefy/src/communication/request_response/incoming_requests_handler.rs @@ -76,9 +76,9 @@ impl IncomingRequest { sent_feedback: None, }; if let Err(_) = pending_response.send(response) { - return Err(Error::DecodingErrorNoReputationChange(peer, err)) + return Err(Error::DecodingErrorNoReputationChange(peer, err)); } - return Err(Error::DecodingError(peer, err)) + return Err(Error::DecodingError(peer, err)); }, }; Ok(Self::new(peer, payload, pending_response)) diff --git a/client/beefy/src/communication/request_response/outgoing_requests_engine.rs b/client/beefy/src/communication/request_response/outgoing_requests_engine.rs index c4d3c926190e6..3670d8f1da2a1 100644 --- a/client/beefy/src/communication/request_response/outgoing_requests_engine.rs +++ b/client/beefy/src/communication/request_response/outgoing_requests_engine.rs @@ -94,7 +94,7 @@ where let live = self.live_peers.lock(); while let Some(peer) = self.peers_cache.pop_front() { if live.contains(&peer) { - return Some(peer) + return Some(peer); } } None @@ -122,7 +122,7 @@ where pub fn request(&mut self, block: NumberFor) { // ignore new requests while there's already one pending if matches!(self.state, State::AwaitingResponse(_, _, _)) { - return + return; } self.reset_peers_cache_for_block(block); @@ -193,7 +193,7 @@ where State::Idle => { futures::pending!(); // Doesn't happen as 'futures::pending!()' is an 'await' barrier that never passes. - return None + return None; }, State::AwaitingResponse(peer, block, receiver) => { let resp = receiver.await; diff --git a/client/beefy/src/justification.rs b/client/beefy/src/justification.rs index 7243c692727f0..5ddd75f3bee1b 100644 --- a/client/beefy/src/justification.rs +++ b/client/beefy/src/justification.rs @@ -48,11 +48,11 @@ fn verify_with_validator_set( ) -> Result<(), ConsensusError> { match proof { VersionedFinalityProof::V1(signed_commitment) => { - if signed_commitment.signatures.len() != validator_set.len() || - signed_commitment.commitment.validator_set_id != validator_set.id() || - signed_commitment.commitment.block_number != target_number + if signed_commitment.signatures.len() != validator_set.len() + || signed_commitment.commitment.validator_set_id != validator_set.id() + || signed_commitment.commitment.block_number != target_number { - return Err(ConsensusError::InvalidJustification) + return Err(ConsensusError::InvalidJustification); } // Arrangement of signatures in the commitment should be in the same order diff --git a/client/beefy/src/round.rs b/client/beefy/src/round.rs index 45d346ccd85eb..0ccef98753b3d 100644 --- a/client/beefy/src/round.rs +++ b/client/beefy/src/round.rs @@ -42,7 +42,7 @@ struct RoundTracker { impl RoundTracker { fn add_vote(&mut self, vote: (Public, Signature), self_vote: bool) -> bool { if self.votes.contains_key(&vote.0) { - return false + return false; } self.self_vote = self.self_vote || self_vote; @@ -110,8 +110,8 @@ where } pub(crate) fn should_self_vote(&self, round: &(P, NumberFor)) -> bool { - Some(round.1) > self.best_done && - self.rounds.get(round).map(|tracker| !tracker.has_self_vote()).unwrap_or(true) + Some(round.1) > self.best_done + && self.rounds.get(round).map(|tracker| !tracker.has_self_vote()).unwrap_or(true) } pub(crate) fn add_vote( diff --git a/client/beefy/src/worker.rs b/client/beefy/src/worker.rs index 9c14128624518..efc5b6e63714e 100644 --- a/client/beefy/src/worker.rs +++ b/client/beefy/src/worker.rs @@ -177,7 +177,7 @@ impl VoterOracle { r } else { debug!(target: "beefy", "🥩 No voting round started"); - return None + return None; }; // `target` is guaranteed > `best_beefy` since `min_block_delta` is at least `1`. @@ -621,13 +621,13 @@ where hash } else { warn!(target: "beefy", "🥩 No MMR root digest found for: {:?}", target_hash); - return Ok(()) + return Ok(()); }; let rounds = self.voting_oracle.rounds_mut().ok_or(Error::UninitSession)?; if !rounds.should_self_vote(&(payload.clone(), target_number)) { debug!(target: "beefy", "🥩 Don't double vote for block number: {:?}", target_number); - return Ok(()) + return Ok(()); } let (validators, validator_set_id) = (rounds.validators(), rounds.validator_set_id()); @@ -636,7 +636,7 @@ where id } else { debug!(target: "beefy", "🥩 Missing validator id - can't vote for: {:?}", target_hash); - return Ok(()) + return Ok(()); }; let commitment = Commitment { payload, block_number: target_number, validator_set_id }; @@ -646,7 +646,7 @@ where Ok(sig) => sig, Err(err) => { warn!(target: "beefy", "🥩 Error signing commitment: {:?}", err); - return Ok(()) + return Ok(()); }, }; @@ -691,7 +691,7 @@ where rounds.session_start(), rounds.validator_set_id(), ); - return + return; } self.best_grandpa_block_header = header.clone(); @@ -725,7 +725,7 @@ where round.conclude(*header.number()); } self.best_beefy_block = Some(*header.number()); - break + break; } if let Some(validator_set) = find_authorities_change::(&header) { @@ -735,7 +735,7 @@ where *header.number() ); self.init_session_at(validator_set, *header.number()); - break + break; } // Move up the chain. diff --git a/client/chain-spec/derive/src/impls.rs b/client/chain-spec/derive/src/impls.rs index 7af403d46ad10..083e87f092961 100644 --- a/client/chain-spec/derive/src/impls.rs +++ b/client/chain-spec/derive/src/impls.rs @@ -97,7 +97,7 @@ pub fn group_derive(ast: &DeriveInput) -> proc_macro::TokenStream { Error::new(Span::call_site(), &format!("Could not find `serde` crate: {}", e)) .to_compile_error(); - return quote!( #err ) + return quote!( #err ); }, }; @@ -176,7 +176,7 @@ pub fn derive( Ok(FoundCrate::Name(chain_spec_name)) => chain_spec_name, Err(e) => { let err = Error::new(Span::call_site(), &e).to_compile_error(); - return quote!( #err ).into() + return quote!( #err ).into(); }, }; let crate_name = Ident::new(&crate_name, Span::call_site()); diff --git a/client/cli/src/arg_enums.rs b/client/cli/src/arg_enums.rs index d761c854a6f0d..e774e7292d67e 100644 --- a/client/cli/src/arg_enums.rs +++ b/client/cli/src/arg_enums.rs @@ -124,16 +124,21 @@ pub fn execution_method_from_cli( #[cfg(feature = "wasmtime")] WasmExecutionMethod::Compiled => sc_service::config::WasmExecutionMethod::Compiled { instantiation_strategy: match _instantiation_strategy { - WasmtimeInstantiationStrategy::PoolingCopyOnWrite => - sc_service::config::WasmtimeInstantiationStrategy::PoolingCopyOnWrite, - WasmtimeInstantiationStrategy::RecreateInstanceCopyOnWrite => - sc_service::config::WasmtimeInstantiationStrategy::RecreateInstanceCopyOnWrite, - WasmtimeInstantiationStrategy::Pooling => - sc_service::config::WasmtimeInstantiationStrategy::Pooling, - WasmtimeInstantiationStrategy::RecreateInstance => - sc_service::config::WasmtimeInstantiationStrategy::RecreateInstance, - WasmtimeInstantiationStrategy::LegacyInstanceReuse => - sc_service::config::WasmtimeInstantiationStrategy::LegacyInstanceReuse, + WasmtimeInstantiationStrategy::PoolingCopyOnWrite => { + sc_service::config::WasmtimeInstantiationStrategy::PoolingCopyOnWrite + }, + WasmtimeInstantiationStrategy::RecreateInstanceCopyOnWrite => { + sc_service::config::WasmtimeInstantiationStrategy::RecreateInstanceCopyOnWrite + }, + WasmtimeInstantiationStrategy::Pooling => { + sc_service::config::WasmtimeInstantiationStrategy::Pooling + }, + WasmtimeInstantiationStrategy::RecreateInstance => { + sc_service::config::WasmtimeInstantiationStrategy::RecreateInstance + }, + WasmtimeInstantiationStrategy::LegacyInstanceReuse => { + sc_service::config::WasmtimeInstantiationStrategy::LegacyInstanceReuse + }, }, }, #[cfg(not(feature = "wasmtime"))] @@ -307,10 +312,12 @@ impl Into for SyncMode { fn into(self) -> sc_network::config::SyncMode { match self { SyncMode::Full => sc_network::config::SyncMode::Full, - SyncMode::Fast => - sc_network::config::SyncMode::Fast { skip_proofs: false, storage_chain_mode: false }, - SyncMode::FastUnsafe => - sc_network::config::SyncMode::Fast { skip_proofs: true, storage_chain_mode: false }, + SyncMode::Fast => { + sc_network::config::SyncMode::Fast { skip_proofs: false, storage_chain_mode: false } + }, + SyncMode::FastUnsafe => { + sc_network::config::SyncMode::Fast { skip_proofs: true, storage_chain_mode: false } + }, SyncMode::Warp => sc_network::config::SyncMode::Warp, } } diff --git a/client/cli/src/commands/purge_chain_cmd.rs b/client/cli/src/commands/purge_chain_cmd.rs index 9a3aeee50e944..b0bf81ee5d3b7 100644 --- a/client/cli/src/commands/purge_chain_cmd.rs +++ b/client/cli/src/commands/purge_chain_cmd.rs @@ -64,7 +64,7 @@ impl PurgeChainCmd { Some('y') | Some('Y') => {}, _ => { println!("Aborted"); - return Ok(()) + return Ok(()); }, } } diff --git a/client/cli/src/commands/run_cmd.rs b/client/cli/src/commands/run_cmd.rs index 35181d83f805f..093779f66dc0c 100644 --- a/client/cli/src/commands/run_cmd.rs +++ b/client/cli/src/commands/run_cmd.rs @@ -499,19 +499,19 @@ impl CliConfiguration for RunCmd { pub fn is_node_name_valid(_name: &str) -> std::result::Result<(), &str> { let name = _name.to_string(); if name.chars().count() >= crate::NODE_NAME_MAX_LENGTH { - return Err("Node name too long") + return Err("Node name too long"); } let invalid_chars = r"[\\.@]"; let re = Regex::new(invalid_chars).unwrap(); if re.is_match(&name) { - return Err("Node name should not contain invalid chars such as '.' and '@'") + return Err("Node name should not contain invalid chars such as '.' and '@'"); } let invalid_patterns = r"(https?:\\/+)?(www)+"; let re = Regex::new(invalid_patterns).unwrap(); if re.is_match(&name) { - return Err("Node name should not contain urls") + return Err("Node name should not contain urls"); } Ok(()) @@ -529,7 +529,7 @@ fn rpc_interface( a validator. Use `--unsafe-rpc-external` or `--rpc-methods=unsafe` if you understand \ the risks. See the options description for more information." .to_owned(), - )) + )); } if is_external || is_unsafe_external { @@ -604,7 +604,7 @@ fn parse_cors(s: &str) -> Result { match part { "all" | "*" => { is_all = true; - break + break; }, other => origins.push(other.to_owned()), } diff --git a/client/cli/src/commands/vanity.rs b/client/cli/src/commands/vanity.rs index ae0007ac7964d..ea3a89eda18b1 100644 --- a/client/cli/src/commands/vanity.rs +++ b/client/cli/src/commands/vanity.rs @@ -103,7 +103,7 @@ where best = score; if best >= top { println!("best: {} == top: {}", best, top); - return Ok(utils::format_seed::(seed.clone())) + return Ok(utils::format_seed::(seed.clone())); } } done += 1; @@ -131,7 +131,7 @@ fn next_seed(seed: &mut [u8]) { }, _ => { *s += 1; - break + break; }, } } @@ -144,7 +144,7 @@ fn calculate_score(_desired: &str, key: &str) -> usize { let snip_size = _desired.len() - truncate; let truncated = &_desired[0..snip_size]; if let Some(pos) = key.find(truncated) { - return (47 - pos) + (snip_size * 48) + return (47 - pos) + (snip_size * 48); } } 0 diff --git a/client/cli/src/commands/verify.rs b/client/cli/src/commands/verify.rs index 82554fbf268fa..8f23eb03fc62c 100644 --- a/client/cli/src/commands/verify.rs +++ b/client/cli/src/commands/verify.rs @@ -81,7 +81,7 @@ where if Pair::verify(&signature, &message, &pubkey) { println!("Signature verifies correctly."); } else { - return Err(error::Error::SignatureInvalid) + return Err(error::Error::SignatureInvalid); } Ok(()) diff --git a/client/cli/src/config.rs b/client/cli/src/config.rs index 77689708a231f..c3249b09ec16e 100644 --- a/client/cli/src/config.rs +++ b/client/cli/src/config.rs @@ -672,7 +672,7 @@ pub fn generate_node_name() -> String { let count = node_name.chars().count(); if count < NODE_NAME_MAX_LENGTH { - return node_name + return node_name; } } } diff --git a/client/cli/src/params/network_params.rs b/client/cli/src/params/network_params.rs index 5580dea45bde6..d65193fe56ed7 100644 --- a/client/cli/src/params/network_params.rs +++ b/client/cli/src/params/network_params.rs @@ -197,15 +197,16 @@ impl NetworkParams { // Activate if the user explicitly requested local discovery, `--dev` is given or the // chain type is `Local`/`Development` let allow_non_globals_in_dht = - self.discover_local || - is_dev || matches!(chain_type, ChainType::Local | ChainType::Development); + self.discover_local + || is_dev || matches!(chain_type, ChainType::Local | ChainType::Development); let allow_private_ipv4 = match (self.allow_private_ipv4, self.no_private_ipv4) { (true, true) => unreachable!("`*_private_ipv4` flags are mutually exclusive; qed"), (true, false) => true, (false, true) => false, - (false, false) => - is_dev || matches!(chain_type, ChainType::Local | ChainType::Development), + (false, false) => { + is_dev || matches!(chain_type, ChainType::Local | ChainType::Development) + }, }; NetworkConfiguration { diff --git a/client/cli/src/params/node_key_params.rs b/client/cli/src/params/node_key_params.rs index 2346455c26a37..75692f7e2a908 100644 --- a/client/cli/src/params/node_key_params.rs +++ b/client/cli/src/params/node_key_params.rs @@ -142,7 +142,9 @@ mod tests { params.node_key(net_config_dir).and_then(|c| match c { NodeKeyConfig::Ed25519(sc_network::config::Secret::Input(ref ski)) if node_key_type == NodeKeyType::Ed25519 && &sk[..] == ski.as_ref() => - Ok(()), + { + Ok(()) + }, _ => Err(error::Error::Input("Unexpected node key config".into())), }) }) @@ -202,7 +204,9 @@ mod tests { params.node_key(net_config_dir).and_then(move |c| match c { NodeKeyConfig::Ed25519(sc_network::config::Secret::File(ref f)) if typ == NodeKeyType::Ed25519 && f == &dir.join(NODE_KEY_ED25519_FILE) => - Ok(()), + { + Ok(()) + }, _ => Err(error::Error::Input("Unexpected node key config".into())), }) }) diff --git a/client/cli/src/params/shared_params.rs b/client/cli/src/params/shared_params.rs index 6c03ac2c4ec23..1c37b40bab610 100644 --- a/client/cli/src/params/shared_params.rs +++ b/client/cli/src/params/shared_params.rs @@ -100,12 +100,13 @@ impl SharedParams { pub fn chain_id(&self, is_dev: bool) -> String { match self.chain { Some(ref chain) => chain.clone(), - None => + None => { if is_dev { "dev".into() } else { "".into() - }, + } + }, } } diff --git a/client/consensus/aura/src/import_queue.rs b/client/consensus/aura/src/import_queue.rs index b17feae45897e..76786074d6f15 100644 --- a/client/consensus/aura/src/import_queue.rs +++ b/client/consensus/aura/src/import_queue.rs @@ -231,8 +231,9 @@ where // skip the inherents verification if the runtime API is old or not expected to // exist. - if !block.state_action.skip_execution_checks() && - self.client + if !block.state_action.skip_execution_checks() + && self + .client .runtime_api() .has_api_with::, _>( &BlockId::Hash(parent_hash), diff --git a/client/consensus/aura/src/lib.rs b/client/consensus/aura/src/lib.rs index 50a02726cf56a..d2fa20ed3543d 100644 --- a/client/consensus/aura/src/lib.rs +++ b/client/consensus/aura/src/lib.rs @@ -126,7 +126,7 @@ where /// Get slot author for given block along with authorities. fn slot_author(slot: Slot, authorities: &[AuthorityId

]) -> Option<&AuthorityId

> { if authorities.is_empty() { - return None + return None; } let idx = *slot % (authorities.len() as u64); @@ -490,7 +490,7 @@ where self.client.info().finalized_number, slot, self.logging_target(), - ) + ); } } false @@ -575,7 +575,7 @@ impl From> for String { /// Get pre-digests from the header pub fn find_pre_digest(header: &B::Header) -> Result> { if header.number().is_zero() { - return Ok(0.into()) + return Ok(0.into()); } let mut pre_digest: Option = None; @@ -607,7 +607,7 @@ where match compatibility_mode { CompatibilityMode::None => {}, // Use `initialize_block` until we hit the block that should disable the mode. - CompatibilityMode::UseInitializeBlock { until } => + CompatibilityMode::UseInitializeBlock { until } => { if *until > context_block_number { runtime_api .initialize_block( @@ -621,7 +621,8 @@ where ), ) .map_err(|_| sp_consensus::Error::InvalidAuthoritiesSet)?; - }, + } + }, } runtime_api diff --git a/client/consensus/babe/src/authorship.rs b/client/consensus/babe/src/authorship.rs index b39153faa6d1a..f9617807db626 100644 --- a/client/consensus/babe/src/authorship.rs +++ b/client/consensus/babe/src/authorship.rs @@ -46,13 +46,13 @@ pub(super) fn calculate_primary_threshold( // configuration parameters, this is not sufficient to guarantee that `c.1` is non-zero // (i.e. third party implementations are possible). if c.1 == 0 || authority_index >= authorities.len() { - return 0 + return 0; } let c = c.0 as f64 / c.1 as f64; - let theta = authorities[authority_index].1 as f64 / - authorities.iter().map(|(_, weight)| weight).sum::() as f64; + let theta = authorities[authority_index].1 as f64 + / authorities.iter().map(|(_, weight)| weight).sum::() as f64; assert!(theta > 0.0, "authority with weight 0."); @@ -109,7 +109,7 @@ pub(super) fn secondary_slot_author( randomness: [u8; 32], ) -> Option<&AuthorityId> { if authorities.is_empty() { - return None + return None; } let rand = U256::from((randomness, slot).using_encoded(blake2_256)); @@ -138,7 +138,7 @@ fn claim_secondary_slot( let Epoch { authorities, randomness, epoch_index, .. } = epoch; if authorities.is_empty() { - return None + return None; } let expected_author = secondary_slot_author(slot, authorities, *randomness)?; @@ -176,7 +176,7 @@ fn claim_secondary_slot( }; if let Some(pre_digest) = pre_digest { - return Some((pre_digest, authority_id.clone())) + return Some((pre_digest, authority_id.clone())); } } } @@ -211,8 +211,8 @@ pub fn claim_slot_using_keys( keys: &[(AuthorityId, usize)], ) -> Option<(PreDigest, AuthorityId)> { claim_primary_slot(slot, epoch, epoch.config.c, keystore, keys).or_else(|| { - if epoch.config.allowed_slots.is_secondary_plain_slots_allowed() || - epoch.config.allowed_slots.is_secondary_vrf_slots_allowed() + if epoch.config.allowed_slots.is_secondary_plain_slots_allowed() + || epoch.config.allowed_slots.is_secondary_vrf_slots_allowed() { claim_secondary_slot( slot, @@ -265,7 +265,7 @@ fn claim_primary_slot( authority_index: *authority_index as u32, }); - return Some((pre_digest, authority_id.clone())) + return Some((pre_digest, authority_id.clone())); } } } diff --git a/client/consensus/babe/src/aux_schema.rs b/client/consensus/babe/src/aux_schema.rs index fef84bda86974..8a4aa2a2c9bbb 100644 --- a/client/consensus/babe/src/aux_schema.rs +++ b/client/consensus/babe/src/aux_schema.rs @@ -62,21 +62,25 @@ pub fn load_epoch_changes( let version = load_decode::<_, u32>(backend, BABE_EPOCH_CHANGES_VERSION)?; let maybe_epoch_changes = match version { - None => + None => { load_decode::<_, EpochChangesV0For>(backend, BABE_EPOCH_CHANGES_KEY)? - .map(|v0| v0.migrate().map(|_, _, epoch| epoch.migrate(config))), - Some(1) => + .map(|v0| v0.migrate().map(|_, _, epoch| epoch.migrate(config))) + }, + Some(1) => { load_decode::<_, EpochChangesV1For>(backend, BABE_EPOCH_CHANGES_KEY)? - .map(|v1| v1.migrate().map(|_, _, epoch| epoch.migrate(config))), + .map(|v1| v1.migrate().map(|_, _, epoch| epoch.migrate(config))) + }, Some(2) => { // v2 still uses `EpochChanges` v1 format but with a different `Epoch` type. load_decode::<_, EpochChangesV1For>(backend, BABE_EPOCH_CHANGES_KEY)? .map(|v2| v2.migrate()) }, - Some(BABE_EPOCH_CHANGES_CURRENT_VERSION) => - load_decode::<_, EpochChangesFor>(backend, BABE_EPOCH_CHANGES_KEY)?, - Some(other) => - return Err(ClientError::Backend(format!("Unsupported BABE DB version: {:?}", other))), + Some(BABE_EPOCH_CHANGES_CURRENT_VERSION) => { + load_decode::<_, EpochChangesFor>(backend, BABE_EPOCH_CHANGES_KEY)? + }, + Some(other) => { + return Err(ClientError::Backend(format!("Unsupported BABE DB version: {:?}", other))) + }, }; let epoch_changes = @@ -199,8 +203,8 @@ mod test { .tree() .iter() .map(|(_, _, epoch)| epoch.clone()) - .collect::>() == - vec![PersistedEpochHeader::Regular(EpochHeader { + .collect::>() + == vec![PersistedEpochHeader::Regular(EpochHeader { start_slot: 0.into(), end_slot: 100.into(), })], diff --git a/client/consensus/babe/src/lib.rs b/client/consensus/babe/src/lib.rs index 109e5aade02a7..1aa3bf27de484 100644 --- a/client/consensus/babe/src/lib.rs +++ b/client/consensus/babe/src/lib.rs @@ -359,10 +359,11 @@ where } }, Some(2) => runtime_api.configuration(&block_id)?, - _ => + _ => { return Err(sp_blockchain::Error::VersionInvalid( "Unsupported or invalid BabeApi version".to_string(), - )), + )) + }, }; Ok(config) } @@ -561,7 +562,7 @@ fn aux_storage_cleanup + HeaderBackend, Block: B .flatten() .map_or(true, |h| h == hash) { - break + break; } }, Err(err) => { @@ -571,7 +572,7 @@ fn aux_storage_cleanup + HeaderBackend, Block: B hash, err, ); - break + break; }, } } @@ -798,13 +799,14 @@ where let sinks = &mut self.slot_notification_sinks.lock(); sinks.retain_mut(|sink| match sink.try_send((slot, epoch_descriptor.clone())) { Ok(()) => true, - Err(e) => + Err(e) => { if e.is_full() { warn!(target: "babe", "Trying to notify a slot but the channel is full"); true } else { false - }, + } + }, }); } @@ -873,7 +875,7 @@ where self.client.info().finalized_number, slot, self.logging_target(), - ) + ); } } false @@ -922,7 +924,7 @@ pub fn find_pre_digest(header: &B::Header) -> Result = None; @@ -946,8 +948,9 @@ fn find_next_epoch_digest( trace!(target: "babe", "Checking log {:?}, looking for epoch change digest.", log); let log = log.try_to::(OpaqueDigestItemId::Consensus(&BABE_ENGINE_ID)); match (log, epoch_digest.is_some()) { - (Some(ConsensusLog::NextEpochData(_)), true) => - return Err(babe_err(Error::MultipleEpochChangeDigests)), + (Some(ConsensusLog::NextEpochData(_)), true) => { + return Err(babe_err(Error::MultipleEpochChangeDigests)) + }, (Some(ConsensusLog::NextEpochData(epoch)), false) => epoch_digest = Some(epoch), _ => trace!(target: "babe", "Ignoring digest not meant for us"), } @@ -965,8 +968,9 @@ fn find_next_config_digest( trace!(target: "babe", "Checking log {:?}, looking for epoch change digest.", log); let log = log.try_to::(OpaqueDigestItemId::Consensus(&BABE_ENGINE_ID)); match (log, config_digest.is_some()) { - (Some(ConsensusLog::NextConfigData(_)), true) => - return Err(babe_err(Error::MultipleConfigChangeDigests)), + (Some(ConsensusLog::NextConfigData(_)), true) => { + return Err(babe_err(Error::MultipleConfigChangeDigests)) + }, (Some(ConsensusLog::NextConfigData(config)), false) => config_digest = Some(config), _ => trace!(target: "babe", "Ignoring digest not meant for us"), } @@ -1049,7 +1053,7 @@ where // don't report any equivocations during initial sync // as they are most likely stale. if *origin == BlockOrigin::NetworkInitialSync { - return Ok(()) + return Ok(()); } // check if authorship of this header is an equivocation and return a proof if so. @@ -1099,7 +1103,7 @@ where Some(proof) => proof, None => { debug!(target: "babe", "Equivocation offender is not part of the authority set."); - return Ok(()) + return Ok(()); }, }, }; @@ -1160,7 +1164,7 @@ where // read it from the state after import. We also skip all verifications // because there's no parent state and we trust the sync module to verify // that the state is correct and finalized. - return Ok((block, Default::default())) + return Ok((block, Default::default())); } debug!(target: "babe", "We have {:?} logs in this header", block.header.digest().logs().len()); @@ -1361,11 +1365,12 @@ where let import_result = self.inner.import_block(block, new_cache).await; let aux = match import_result { Ok(ImportResult::Imported(aux)) => aux, - Ok(r) => + Ok(r) => { return Err(ConsensusError::ClientImport(format!( "Unexpected import result: {:?}", r - ))), + ))) + }, Err(r) => return Err(r.into()), }; @@ -1421,14 +1426,14 @@ where // When re-importing existing block strip away intermediates. let _ = block.remove_intermediate::>(INTERMEDIATE_KEY); block.fork_choice = Some(ForkChoiceStrategy::Custom(false)); - return self.inner.import_block(block, new_cache).await.map_err(Into::into) + return self.inner.import_block(block, new_cache).await.map_err(Into::into); }, Ok(sp_blockchain::BlockStatus::Unknown) => {}, Err(e) => return Err(ConsensusError::ClientImport(e.to_string())), } if block.with_state() { - return self.import_state(block, new_cache).await + return self.import_state(block, new_cache).await; } let pre_digest = find_pre_digest::(&block.header).expect( @@ -1456,7 +1461,7 @@ where if slot <= parent_slot { return Err(ConsensusError::ClientImport( babe_err(Error::::SlotMustIncrease(parent_slot, slot)).into(), - )) + )); } // if there's a pending epoch we'll save the previous epoch changes here @@ -1506,18 +1511,21 @@ where match (first_in_epoch, next_epoch_digest.is_some(), next_config_digest.is_some()) { (true, true, _) => {}, (false, false, false) => {}, - (false, false, true) => + (false, false, true) => { return Err(ConsensusError::ClientImport( babe_err(Error::::UnexpectedConfigChange).into(), - )), - (true, false, _) => + )) + }, + (true, false, _) => { return Err(ConsensusError::ClientImport( babe_err(Error::::ExpectedEpochChange(hash, slot)).into(), - )), - (false, true, _) => + )) + }, + (false, true, _) => { return Err(ConsensusError::ClientImport( babe_err(Error::::UnexpectedEpochChange).into(), - )), + )) + }, } let info = self.client.info(); @@ -1590,7 +1598,7 @@ where debug!(target: "babe", "Failed to launch next epoch: {}", e); *epoch_changes = old_epoch_changes.expect("set `Some` above and not taken; qed"); - return Err(e) + return Err(e); } crate::aux_schema::write_epoch_changes::(&*epoch_changes, |insert| { @@ -1814,7 +1822,7 @@ where let revertible = blocks.min(best_number - finalized); if revertible == Zero::zero() { - return Ok(()) + return Ok(()); } let revert_up_to_number = best_number - revertible; @@ -1850,11 +1858,11 @@ where let mut hash = leaf; loop { let meta = client.header_metadata(hash)?; - if meta.number <= revert_up_to_number || - !weight_keys.insert(aux_schema::block_weight_key(hash)) + if meta.number <= revert_up_to_number + || !weight_keys.insert(aux_schema::block_weight_key(hash)) { // We've reached the revert point or an already processed branch, stop here. - break + break; } hash = meta.parent; } diff --git a/client/consensus/babe/src/tests.rs b/client/consensus/babe/src/tests.rs index 8bef1b38b929d..64a9520a77321 100644 --- a/client/consensus/babe/src/tests.rs +++ b/client/consensus/babe/src/tests.rs @@ -582,7 +582,7 @@ fn can_author_block() { None => i += 1, Some(s) => { debug!(target: "babe", "Authored block {:?}", s.0); - break + break; }, } } diff --git a/client/consensus/babe/src/verification.rs b/client/consensus/babe/src/verification.rs index 53ec3002e6a85..3592a18e7b548 100644 --- a/client/consensus/babe/src/verification.rs +++ b/client/consensus/babe/src/verification.rs @@ -83,7 +83,7 @@ pub(super) fn check_header( if pre_digest.slot() > slot_now { header.digest_mut().push(seal); - return Ok(CheckedHeader::Deferred(header, pre_digest.slot())) + return Ok(CheckedHeader::Deferred(header, pre_digest.slot())); } let author = match authorities.get(pre_digest.authority_index() as usize) { @@ -168,7 +168,7 @@ fn check_primary_header( calculate_primary_threshold(c, &epoch.authorities, pre_digest.authority_index as usize); if !check_primary_threshold(&inout, threshold) { - return Err(babe_err(Error::VRFVerificationOfBlockFailed(author.clone(), threshold))) + return Err(babe_err(Error::VRFVerificationOfBlockFailed(author.clone(), threshold))); } Ok(()) @@ -196,7 +196,7 @@ fn check_secondary_plain_header( let author = &epoch.authorities[pre_digest.authority_index as usize].0; if expected_author != author { - return Err(Error::InvalidAuthor(expected_author.clone(), author.clone())) + return Err(Error::InvalidAuthor(expected_author.clone(), author.clone())); } if AuthorityPair::verify(&signature, pre_hash.as_ref(), author) { @@ -222,7 +222,7 @@ fn check_secondary_vrf_header( let author = &epoch.authorities[pre_digest.authority_index as usize].0; if expected_author != author { - return Err(Error::InvalidAuthor(expected_author.clone(), author.clone())) + return Err(Error::InvalidAuthor(expected_author.clone(), author.clone())); } if AuthorityPair::verify(&signature, pre_hash.as_ref(), author) { diff --git a/client/consensus/common/src/block_import.rs b/client/consensus/common/src/block_import.rs index f888176addd2d..dc9908ad3df34 100644 --- a/client/consensus/common/src/block_import.rs +++ b/client/consensus/common/src/block_import.rs @@ -157,9 +157,9 @@ impl StateAction { /// Check if execution checks that require runtime calls should be skipped. pub fn skip_execution_checks(&self) -> bool { match self { - StateAction::ApplyChanges(_) | - StateAction::Execute | - StateAction::ExecuteIfPossible => false, + StateAction::ApplyChanges(_) + | StateAction::Execute + | StateAction::ExecuteIfPossible => false, StateAction::Skip => true, } } @@ -270,8 +270,9 @@ impl BlockImportParams { ) -> BlockImportParams { // Preserve imported state. let state_action = match self.state_action { - StateAction::ApplyChanges(StorageChanges::Import(state)) => - StateAction::ApplyChanges(StorageChanges::Import(state)), + StateAction::ApplyChanges(StorageChanges::Import(state)) => { + StateAction::ApplyChanges(StorageChanges::Import(state)) + }, StateAction::ApplyChanges(StorageChanges::Changes(_)) => StateAction::Skip, StateAction::Execute => StateAction::Execute, StateAction::ExecuteIfPossible => StateAction::ExecuteIfPossible, diff --git a/client/consensus/common/src/import_queue.rs b/client/consensus/common/src/import_queue.rs index 3741fa99663cd..36e1dc79fc72e 100644 --- a/client/consensus/common/src/import_queue.rs +++ b/client/consensus/common/src/import_queue.rs @@ -164,8 +164,8 @@ impl BlockImportStatus { /// Returns the imported block number. pub fn number(&self) -> &N { match self { - BlockImportStatus::ImportedKnown(n, _) | - BlockImportStatus::ImportedUnknown(n, _, _) => n, + BlockImportStatus::ImportedKnown(n, _) + | BlockImportStatus::ImportedUnknown(n, _, _) => n, } } } @@ -236,7 +236,7 @@ pub(crate) async fn import_single_block_metered< } else { debug!(target: "sync", "Header {} was not provided ", block.hash); } - return Err(BlockImportError::IncompleteHeader(peer)) + return Err(BlockImportError::IncompleteHeader(peer)); }, }; @@ -251,8 +251,9 @@ pub(crate) async fn import_single_block_metered< trace!(target: "sync", "Block already in chain {}: {:?}", number, hash); Ok(BlockImportStatus::ImportedKnown(number, peer)) }, - Ok(ImportResult::Imported(aux)) => - Ok(BlockImportStatus::ImportedUnknown(number, aux, peer)), + Ok(ImportResult::Imported(aux)) => { + Ok(BlockImportStatus::ImportedUnknown(number, aux, peer)) + }, Ok(ImportResult::MissingState) => { debug!(target: "sync", "Parent state is missing for {}: {:?}, parent: {:?}", number, hash, parent_hash); diff --git a/client/consensus/common/src/import_queue/basic_queue.rs b/client/consensus/common/src/import_queue/basic_queue.rs index 0e607159b75c3..7bf591a1f174e 100644 --- a/client/consensus/common/src/import_queue/basic_queue.rs +++ b/client/consensus/common/src/import_queue/basic_queue.rs @@ -102,7 +102,7 @@ impl BasicQueue { impl ImportQueue for BasicQueue { fn import_blocks(&mut self, origin: BlockOrigin, blocks: Vec>) { if blocks.is_empty() { - return + return; } trace!(target: "sync", "Scheduling {} blocks for import", blocks.len()); @@ -183,7 +183,7 @@ async fn block_import_process( target: "block-import", "Stopping block import because the import channel was closed!", ); - return + return; }, }; @@ -257,26 +257,27 @@ impl BlockImportWorker { target: "block-import", "Stopping block import because result channel was closed!", ); - return + return; } // Make sure to first process all justifications while let Poll::Ready(justification) = futures::poll!(justification_port.next()) { match justification { - Some(ImportJustification(who, hash, number, justification)) => - worker.import_justification(who, hash, number, justification).await, + Some(ImportJustification(who, hash, number, justification)) => { + worker.import_justification(who, hash, number, justification).await + }, None => { log::debug!( target: "block-import", "Stopping block import because justification channel was closed!", ); - return + return; }, } } if let Poll::Ready(()) = futures::poll!(&mut block_import_process) { - return + return; } // All futures that we polled are now pending. @@ -370,7 +371,7 @@ async fn import_many_blocks, Transaction: Send + 'stat Some(b) => b, None => { // No block left to import, success! - return ImportManyBlocksResult { block_count: count, imported, results } + return ImportManyBlocksResult { block_count: count, imported, results }; }, }; diff --git a/client/consensus/common/src/import_queue/buffered_link.rs b/client/consensus/common/src/import_queue/buffered_link.rs index 5d418dddf0853..94cd12ef7d3b0 100644 --- a/client/consensus/common/src/import_queue/buffered_link.rs +++ b/client/consensus/common/src/import_queue/buffered_link.rs @@ -139,12 +139,15 @@ impl BufferedLinkReceiver { }; match msg { - BlockImportWorkerMsg::BlocksProcessed(imported, count, results) => - link.blocks_processed(imported, count, results), - BlockImportWorkerMsg::JustificationImported(who, hash, number, success) => - link.justification_imported(who, &hash, number, success), - BlockImportWorkerMsg::RequestJustification(hash, number) => - link.request_justification(&hash, number), + BlockImportWorkerMsg::BlocksProcessed(imported, count, results) => { + link.blocks_processed(imported, count, results) + }, + BlockImportWorkerMsg::JustificationImported(who, hash, number, success) => { + link.justification_imported(who, &hash, number, success) + }, + BlockImportWorkerMsg::RequestJustification(hash, number) => { + link.request_justification(&hash, number) + }, } } } diff --git a/client/consensus/epochs/src/lib.rs b/client/consensus/epochs/src/lib.rs index f8b6253ef2353..97dbbbf497314 100644 --- a/client/consensus/epochs/src/lib.rs +++ b/client/consensus/epochs/src/lib.rs @@ -199,8 +199,9 @@ where pub fn increment(&self, next_descriptor: E::NextEpochDescriptor) -> IncrementedEpoch { let next = self.as_ref().increment(next_descriptor); let to_persist = match *self { - ViableEpoch::UnimportedGenesis(ref epoch_0) => - PersistedEpoch::Genesis(epoch_0.clone(), next), + ViableEpoch::UnimportedGenesis(ref epoch_0) => { + PersistedEpoch::Genesis(epoch_0.clone(), next) + }, ViableEpoch::Signaled(_) => PersistedEpoch::Regular(next), }; @@ -246,8 +247,9 @@ impl PersistedEpoch { impl<'a, E: Epoch> From<&'a PersistedEpoch> for PersistedEpochHeader { fn from(epoch: &'a PersistedEpoch) -> Self { match epoch { - PersistedEpoch::Genesis(ref epoch_0, ref epoch_1) => - PersistedEpochHeader::Genesis(epoch_0.into(), epoch_1.into()), + PersistedEpoch::Genesis(ref epoch_0, ref epoch_1) => { + PersistedEpochHeader::Genesis(epoch_0.into(), epoch_1.into()) + }, PersistedEpoch::Regular(ref epoch_n) => PersistedEpochHeader::Regular(epoch_n.into()), } } @@ -261,8 +263,9 @@ impl PersistedEpoch { F: FnMut(&Hash, &Number, E) -> B, { match self { - PersistedEpoch::Genesis(epoch_0, epoch_1) => - PersistedEpoch::Genesis(f(h, n, epoch_0), f(h, n, epoch_1)), + PersistedEpoch::Genesis(epoch_0, epoch_1) => { + PersistedEpoch::Genesis(f(h, n, epoch_0), f(h, n, epoch_1)) + }, PersistedEpoch::Regular(epoch_n) => PersistedEpoch::Regular(f(h, n, epoch_n)), } } @@ -347,33 +350,40 @@ where match &self.current { (_, _, PersistedEpoch::Genesis(epoch_0, _)) if slot >= epoch_0.start_slot() && slot < epoch_0.end_slot() => + { return Some(( self.current.0, self.current.1, epoch_0.into(), EpochIdentifierPosition::Genesis0, - )), + )) + }, (_, _, PersistedEpoch::Genesis(_, epoch_1)) if slot >= epoch_1.start_slot() && slot < epoch_1.end_slot() => + { return Some(( self.current.0, self.current.1, epoch_1.into(), EpochIdentifierPosition::Genesis1, - )), + )) + }, (_, _, PersistedEpoch::Regular(epoch_n)) if slot >= epoch_n.start_slot() && slot < epoch_n.end_slot() => + { return Some(( self.current.0, self.current.1, epoch_n.into(), EpochIdentifierPosition::Regular, - )), + )) + }, _ => {}, }; match &self.next { - Some((h, n, epoch_n)) if slot >= epoch_n.start_slot() && slot < epoch_n.end_slot() => - Some((*h, *n, epoch_n.into(), EpochIdentifierPosition::Regular)), + Some((h, n, epoch_n)) if slot >= epoch_n.start_slot() && slot < epoch_n.end_slot() => { + Some((*h, *n, epoch_n.into(), EpochIdentifierPosition::Regular)) + }, _ => None, } } @@ -384,19 +394,27 @@ where ((h, n, e), _) if h == &id.hash && n == &id.number => match e { PersistedEpoch::Genesis(ref epoch_0, _) if id.position == EpochIdentifierPosition::Genesis0 => - Some(epoch_0), + { + Some(epoch_0) + }, PersistedEpoch::Genesis(_, ref epoch_1) if id.position == EpochIdentifierPosition::Genesis1 => - Some(epoch_1), + { + Some(epoch_1) + }, PersistedEpoch::Regular(ref epoch_n) if id.position == EpochIdentifierPosition::Regular => - Some(epoch_n), + { + Some(epoch_n) + }, _ => None, }, (_, Some((h, n, e))) - if h == &id.hash && - n == &id.number && id.position == EpochIdentifierPosition::Regular => - Some(e), + if h == &id.hash + && n == &id.number && id.position == EpochIdentifierPosition::Regular => + { + Some(e) + }, _ => None, } } @@ -537,18 +555,24 @@ where /// Get a reference to an epoch with given identifier. pub fn epoch(&self, id: &EpochIdentifier) -> Option<&E> { if let Some(e) = &self.gap.as_ref().and_then(|gap| gap.epoch(id)) { - return Some(e) + return Some(e); } self.epochs.get(&(id.hash, id.number)).and_then(|v| match v { PersistedEpoch::Genesis(ref epoch_0, _) if id.position == EpochIdentifierPosition::Genesis0 => - Some(epoch_0), + { + Some(epoch_0) + }, PersistedEpoch::Genesis(_, ref epoch_1) if id.position == EpochIdentifierPosition::Genesis1 => - Some(epoch_1), + { + Some(epoch_1) + }, PersistedEpoch::Regular(ref epoch_n) if id.position == EpochIdentifierPosition::Regular => - Some(epoch_n), + { + Some(epoch_n) + }, _ => None, }) } @@ -563,10 +587,12 @@ where G: FnOnce(E::Slot) -> E, { match descriptor { - ViableEpochDescriptor::UnimportedGenesis(slot) => - Some(ViableEpoch::UnimportedGenesis(make_genesis(*slot))), - ViableEpochDescriptor::Signaled(identifier, _) => - self.epoch(identifier).map(ViableEpoch::Signaled), + ViableEpochDescriptor::UnimportedGenesis(slot) => { + Some(ViableEpoch::UnimportedGenesis(make_genesis(*slot))) + }, + ViableEpochDescriptor::Signaled(identifier, _) => { + self.epoch(identifier).map(ViableEpoch::Signaled) + }, } } @@ -575,13 +601,19 @@ where self.epochs.get_mut(&(id.hash, id.number)).and_then(|v| match v { PersistedEpoch::Genesis(ref mut epoch_0, _) if id.position == EpochIdentifierPosition::Genesis0 => - Some(epoch_0), + { + Some(epoch_0) + }, PersistedEpoch::Genesis(_, ref mut epoch_1) if id.position == EpochIdentifierPosition::Genesis1 => - Some(epoch_1), + { + Some(epoch_1) + }, PersistedEpoch::Regular(ref mut epoch_n) if id.position == EpochIdentifierPosition::Regular => - Some(epoch_n), + { + Some(epoch_n) + }, _ => None, }) } @@ -596,10 +628,12 @@ where G: FnOnce(E::Slot) -> E, { match descriptor { - ViableEpochDescriptor::UnimportedGenesis(slot) => - Some(ViableEpoch::UnimportedGenesis(make_genesis(*slot))), - ViableEpochDescriptor::Signaled(identifier, _) => - self.epoch_mut(identifier).map(ViableEpoch::Signaled), + ViableEpochDescriptor::UnimportedGenesis(slot) => { + Some(ViableEpoch::UnimportedGenesis(make_genesis(*slot))) + }, + ViableEpochDescriptor::Signaled(identifier, _) => { + self.epoch_mut(identifier).map(ViableEpoch::Signaled) + }, } } @@ -662,7 +696,7 @@ where ) -> Result>, fork_tree::Error> { if parent_number == Zero::zero() { // need to insert the genesis epoch. - return Ok(Some(ViableEpochDescriptor::UnimportedGenesis(slot))) + return Ok(Some(ViableEpochDescriptor::UnimportedGenesis(slot))); } if let Some(gap) = &self.gap { @@ -670,7 +704,7 @@ where return Ok(Some(ViableEpochDescriptor::Signaled( EpochIdentifier { position, hash, number }, hdr, - ))) + ))); } } @@ -714,8 +748,9 @@ where (EpochIdentifierPosition::Genesis0, epoch_0.clone()) } }, - PersistedEpochHeader::Regular(ref epoch_n) => - (EpochIdentifierPosition::Regular, epoch_n.clone()), + PersistedEpochHeader::Regular(ref epoch_n) => { + (EpochIdentifierPosition::Regular, epoch_n.clone()) + }, }, node, ) @@ -755,15 +790,15 @@ where Err(e) => PersistedEpoch::Regular(e), } } - } else if epoch.is_genesis() && - !self.epochs.is_empty() && - !self.epochs.values().any(|e| e.is_genesis()) + } else if epoch.is_genesis() + && !self.epochs.is_empty() + && !self.epochs.values().any(|e| e.is_genesis()) { // There's a genesis epoch imported when we already have an active epoch. // This happens after the warp sync as the ancient blocks download start. // We need to start tracking gap epochs here. self.gap = Some(GapEpochs { current: (hash, number, epoch), next: None }); - return Ok(()) + return Ok(()); } let res = self.inner.import(hash, number, header, &is_descendent_of); @@ -809,8 +844,8 @@ where let is_descendent_of = descendent_of_builder.build_is_descendent_of(None); let filter = |node_hash: &Hash, node_num: &Number, _: &PersistedEpochHeader| { - if number >= *node_num && - (is_descendent_of(node_hash, &hash).unwrap_or_default() || *node_hash == hash) + if number >= *node_num + && (is_descendent_of(node_hash, &hash).unwrap_or_default() || *node_hash == hash) { // Continue the search in this subtree. FilterAction::KeepNode @@ -872,7 +907,7 @@ mod tests { if let Some((ref c_head, ref c_parent)) = current { if head == c_head { if base == c_parent { - return Ok(true) + return Ok(true); } else { head = c_parent; } diff --git a/client/consensus/manual-seal/src/consensus/babe.rs b/client/consensus/manual-seal/src/consensus/babe.rs index 206f5163a13cd..0cf33beddbbe4 100644 --- a/client/consensus/manual-seal/src/consensus/babe.rs +++ b/client/consensus/manual-seal/src/consensus/babe.rs @@ -149,7 +149,7 @@ where authorities: Vec<(AuthorityId, BabeAuthorityWeight)>, ) -> Result { if authorities.is_empty() { - return Err(Error::StringError("Cannot supply empty authority set!".into())) + return Err(Error::StringError("Cannot supply empty authority set!".into())); } let config = sc_consensus_babe::configuration(&*client)?; @@ -299,14 +299,15 @@ where // manually hard code epoch descriptor epoch_descriptor = match epoch_descriptor { - ViableEpochDescriptor::Signaled(identifier, _header) => + ViableEpochDescriptor::Signaled(identifier, _header) => { ViableEpochDescriptor::Signaled( identifier, EpochHeader { start_slot: slot, end_slot: (*slot * self.config.epoch_length).into(), }, - ), + ) + }, _ => unreachable!( "we're not in the authorities, so this isn't the genesis epoch; qed" ), diff --git a/client/consensus/manual-seal/src/seal_block.rs b/client/consensus/manual-seal/src/seal_block.rs index 32e3acf68506e..c5dc5fd09a28f 100644 --- a/client/consensus/manual-seal/src/seal_block.rs +++ b/client/consensus/manual-seal/src/seal_block.rs @@ -95,7 +95,7 @@ pub async fn seal_block( { let future = async { if pool.status().ready == 0 && !create_empty { - return Err(Error::EmptyTransactionPool) + return Err(Error::EmptyTransactionPool); } // get the header to build this new block on. @@ -135,7 +135,7 @@ pub async fn seal_block( .await?; if proposal.block.extrinsics().len() == inherents_len && !create_empty { - return Err(Error::EmptyTransactionPool) + return Err(Error::EmptyTransactionPool); } let (header, body) = proposal.block.deconstruct(); @@ -158,8 +158,9 @@ pub async fn seal_block( post_header.digest_mut().logs.extend(params.post_digests.iter().cloned()); match block_import.import_block(params, HashMap::new()).await? { - ImportResult::Imported(aux) => - Ok(CreatedBlock { hash: ::Header::hash(&post_header), aux }), + ImportResult::Imported(aux) => { + Ok(CreatedBlock { hash: ::Header::hash(&post_header), aux }) + }, other => Err(other.into()), } }; diff --git a/client/consensus/pow/src/lib.rs b/client/consensus/pow/src/lib.rs index dcf069d617bab..cffc0fe3959af 100644 --- a/client/consensus/pow/src/lib.rs +++ b/client/consensus/pow/src/lib.rs @@ -270,7 +270,7 @@ where execution_context: ExecutionContext, ) -> Result<(), Error> { if *block.header().number() < self.check_inherents_after { - return Ok(()) + return Ok(()); } let inherent_data = inherent_data_providers @@ -372,7 +372,7 @@ where &inner_seal, difficulty, )? { - return Err(Error::::InvalidSeal.into()) + return Err(Error::::InvalidSeal.into()); } aux.difficulty = difficulty; @@ -417,19 +417,20 @@ impl PowVerifier { let hash = header.hash(); let (seal, inner_seal) = match header.digest_mut().pop() { - Some(DigestItem::Seal(id, seal)) => + Some(DigestItem::Seal(id, seal)) => { if id == POW_ENGINE_ID { (DigestItem::Seal(id, seal.clone()), seal) } else { - return Err(Error::WrongEngine(id)) - }, + return Err(Error::WrongEngine(id)); + } + }, _ => return Err(Error::HeaderUnsealed(hash)), }; let pre_hash = header.hash(); if !self.algorithm.preliminary_verify(&pre_hash, &inner_seal)?.unwrap_or(true) { - return Err(Error::FailedPreliminaryVerify) + return Err(Error::FailedPreliminaryVerify); } Ok((header, seal)) @@ -526,13 +527,13 @@ where let task = async move { loop { if timer.next().await.is_none() { - break + break; } if sync_oracle.is_major_syncing() { debug!(target: "pow", "Skipping proposal due to sync."); worker.on_major_syncing(); - continue + continue; } let best_header = match select_chain.best_chain().await { @@ -544,13 +545,13 @@ where Select best chain error: {}", err ); - continue + continue; }, }; let best_hash = best_header.hash(); if worker.best_hash() == Some(best_hash) { - continue + continue; } // The worker is locked for the duration of the whole proposing period. Within this @@ -565,7 +566,7 @@ where Fetch difficulty failed: {}", err, ); - continue + continue; }, }; @@ -581,7 +582,7 @@ where Creating inherent data providers failed: {}", err, ); - continue + continue; }, }; @@ -594,7 +595,7 @@ where Creating inherent data failed: {}", e, ); - continue + continue; }, }; @@ -614,7 +615,7 @@ where Creating proposer failed: {:?}", err, ); - continue + continue; }, }; @@ -628,7 +629,7 @@ where Creating proposal failed: {}", err, ); - continue + continue; }, }; @@ -655,8 +656,9 @@ fn find_pre_digest(header: &B::Header) -> Result>, Err for log in header.digest().logs() { trace!(target: "pow", "Checking log {:?}, looking for pre runtime digest", log); match (log, pre_digest.is_some()) { - (DigestItem::PreRuntime(POW_ENGINE_ID, _), true) => - return Err(Error::MultiplePreRuntimeDigests), + (DigestItem::PreRuntime(POW_ENGINE_ID, _), true) => { + return Err(Error::MultiplePreRuntimeDigests) + }, (DigestItem::PreRuntime(POW_ENGINE_ID, v), false) => { pre_digest = Some(v.clone()); }, @@ -670,12 +672,13 @@ fn find_pre_digest(header: &B::Header) -> Result>, Err /// Fetch PoW seal. fn fetch_seal(digest: Option<&DigestItem>, hash: B::Hash) -> Result, Error> { match digest { - Some(DigestItem::Seal(id, seal)) => + Some(DigestItem::Seal(id, seal)) => { if id == &POW_ENGINE_ID { Ok(seal.clone()) } else { Err(Error::::WrongEngine(*id)) - }, + } + }, _ => Err(Error::::HeaderUnsealed(hash)), } } diff --git a/client/consensus/pow/src/worker.rs b/client/consensus/pow/src/worker.rs index a00da6e7022fb..3e7fab50a610e 100644 --- a/client/consensus/pow/src/worker.rs +++ b/client/consensus/pow/src/worker.rs @@ -163,7 +163,7 @@ where target: "pow", "Unable to import mined block: seal is invalid", ); - return false + return false; }, Err(err) => { warn!( @@ -171,7 +171,7 @@ where "Unable to import mined block: {}", err, ); - return false + return false; }, } } else { @@ -179,7 +179,7 @@ where target: "pow", "Unable to import mined block: metadata does not exist", ); - return false + return false; } let build = if let Some(build) = { @@ -196,7 +196,7 @@ where target: "pow", "Unable to import mined block: build does not exist", ); - return false + return false; }; let seal = DigestItem::Seal(POW_ENGINE_ID, seal); diff --git a/client/consensus/slots/src/aux_schema.rs b/client/consensus/slots/src/aux_schema.rs index c1d01500ffe47..378ad24c95c4f 100644 --- a/client/consensus/slots/src/aux_schema.rs +++ b/client/consensus/slots/src/aux_schema.rs @@ -64,7 +64,7 @@ where { // We don't check equivocations for old headers out of our capacity. if slot_now.saturating_sub(*slot) > MAX_SLOT_CAPACITY { - return Ok(None) + return Ok(None); } // Key for this slot. @@ -81,7 +81,7 @@ where if slot_now < first_saved_slot { // The code below assumes that slots will be visited sequentially. - return Ok(None) + return Ok(None); } for (prev_header, prev_signer) in headers_with_sig.iter() { @@ -101,7 +101,7 @@ where // since it's already saved and a possible equivocation // would have been detected before. Ok(None) - } + }; } } diff --git a/client/consensus/slots/src/lib.rs b/client/consensus/slots/src/lib.rs index 90bfef6c1609c..a34cc85902451 100644 --- a/client/consensus/slots/src/lib.rs +++ b/client/consensus/slots/src/lib.rs @@ -215,7 +215,7 @@ pub trait SimpleSlotWorker { Either::Left((Err(err), _)) => { warn!(target: logging_target, "Proposing failed: {}", err); - return None + return None; }, Either::Right(_) => { info!( @@ -235,7 +235,7 @@ pub trait SimpleSlotWorker { "slot" => *slot, ); - return None + return None; }, }; @@ -262,7 +262,7 @@ pub trait SimpleSlotWorker { "Skipping proposal slot {} since there's no time left to propose", slot, ); - return None + return None; } else { Delay::new(proposing_remaining_duration) }; @@ -285,7 +285,7 @@ pub trait SimpleSlotWorker { "err" => ?err, ); - return None + return None; }, }; @@ -293,9 +293,9 @@ pub trait SimpleSlotWorker { let authorities_len = self.authorities_len(&aux_data); - if !self.force_authoring() && - self.sync_oracle().is_offline() && - authorities_len.map(|a| a > 1).unwrap_or(false) + if !self.force_authoring() + && self.sync_oracle().is_offline() + && authorities_len.map(|a| a > 1).unwrap_or(false) { debug!(target: logging_target, "Skipping proposal slot. Waiting for the network."); telemetry!( @@ -305,13 +305,13 @@ pub trait SimpleSlotWorker { "authorities_len" => authorities_len, ); - return None + return None; } let claim = self.claim_slot(&slot_info.chain_head, slot, &aux_data).await?; if self.should_backoff(slot, &slot_info.chain_head) { - return None + return None; } debug!(target: logging_target, "Starting authorship at slot: {slot}"); @@ -331,7 +331,7 @@ pub trait SimpleSlotWorker { "err" => ?err ); - return None + return None; }, }; @@ -358,7 +358,7 @@ pub trait SimpleSlotWorker { Err(err) => { warn!(target: logging_target, "Failed to create block import params: {}", err); - return None + return None; }, }; @@ -484,13 +484,13 @@ pub async fn start_slot_worker( Ok(r) => r, Err(e) => { warn!(target: "slots", "Error while polling for next slot: {}", e); - return + return; }, }; if sync_oracle.is_major_syncing() { debug!(target: "slots", "Skipping proposal slot due to sync."); - continue + continue; } let _ = worker.on_slot(slot_info).await; @@ -570,7 +570,7 @@ pub fn proposing_remaining_duration( // If parent is genesis block, we don't require any lenience factor. if slot_info.chain_head.number().is_zero() { - return proposing_duration + return proposing_duration; } let parent_slot = match parent_slot { @@ -733,7 +733,7 @@ where ) -> bool { // This should not happen, but we want to keep the previous behaviour if it does. if slot_now <= chain_head_slot { - return false + return false; } // There can be race between getting the finalized number and getting the best number. diff --git a/client/consensus/slots/src/slots.rs b/client/consensus/slots/src/slots.rs index f3dc485a8e819..2bbdb0b7e0ee0 100644 --- a/client/consensus/slots/src/slots.rs +++ b/client/consensus/slots/src/slots.rs @@ -155,7 +155,7 @@ where ); // Let's try at the next slot.. self.inner_delay.take(); - continue + continue; }, }; @@ -178,7 +178,7 @@ where if slot > self.last_slot { self.last_slot = slot; - break Ok(SlotInfo::new(slot, inherent_data, self.slot_duration, chain_head, None)) + break Ok(SlotInfo::new(slot, inherent_data, self.slot_duration, chain_head, None)); } } } diff --git a/client/db/src/bench.rs b/client/db/src/bench.rs index 13d91fff0b555..db6d6cb7146ca 100644 --- a/client/db/src/bench.rs +++ b/client/db/src/bench.rs @@ -177,7 +177,7 @@ impl BenchmarkingState { // Childtrie is identified by its storage key (i.e. `ChildInfo::storage_key`) fn add_read_key(&self, childtrie: Option<&[u8]>, key: &[u8]) { if !self.enable_tracking { - return + return; } let mut child_key_tracker = self.child_key_tracker.borrow_mut(); @@ -218,7 +218,7 @@ impl BenchmarkingState { // Childtrie is identified by its storage key (i.e. `ChildInfo::storage_key`) fn add_write_key(&self, childtrie: Option<&[u8]>, key: &[u8]) { if !self.enable_tracking { - return + return; } let mut child_key_tracker = self.child_key_tracker.borrow_mut(); @@ -489,7 +489,7 @@ impl StateBackend> for BenchmarkingState { }) }); } else { - return Err("Trying to commit to a closed db".into()) + return Err("Trying to commit to a closed db".into()); } self.reopen() } diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index 305db2284b2ed..c4ed98e54d553 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -529,15 +529,16 @@ impl sc_client_api::blockchain::HeaderBackend for Blockcha BlockId::Hash(h) => { let mut cache = self.header_cache.lock(); if let Some(result) = cache.get_refresh(h) { - return Ok(result.clone()) + return Ok(result.clone()); } let header = utils::read_header(&*self.db, columns::KEY_LOOKUP, columns::HEADER, id)?; cache_header(&mut cache, *h, header.clone()); Ok(header) }, - BlockId::Number(_) => - utils::read_header(&*self.db, columns::KEY_LOOKUP, columns::HEADER, id), + BlockId::Number(_) => { + utils::read_header(&*self.db, columns::KEY_LOOKUP, columns::HEADER, id) + }, } } @@ -584,11 +585,12 @@ impl sc_client_api::blockchain::Backend for BlockchainDb return Ok(Some(body)), - Err(err) => + Err(err) => { return Err(sp_blockchain::Error::Backend(format!( "Error decoding body: {}", err - ))), + ))) + }, } } @@ -618,11 +620,12 @@ impl sc_client_api::blockchain::Backend for BlockchainDb + None => { return Err(sp_blockchain::Error::Backend(format!( "Missing indexed transaction {:?}", hash - ))), + ))) + }, }; }, DbExtrinsic::Full(ex) => { @@ -630,13 +633,14 @@ impl sc_client_api::blockchain::Backend for BlockchainDb + Err(err) => { return Err(sp_blockchain::Error::Backend(format!( "Error decoding body list: {}", err - ))), + ))) + }, } } Ok(None) @@ -651,11 +655,12 @@ impl sc_client_api::blockchain::Backend for BlockchainDb match Decode::decode(&mut &justifications[..]) { Ok(justifications) => Ok(Some(justifications)), - Err(err) => + Err(err) => { return Err(sp_blockchain::Error::Backend(format!( "Error decoding justifications: {}", err - ))), + ))) + }, }, None => Ok(None), } @@ -711,18 +716,20 @@ impl sc_client_api::blockchain::Backend for BlockchainDb transactions.push(t), - None => + None => { return Err(sp_blockchain::Error::Backend(format!( "Missing indexed transaction {:?}", hash - ))), + ))) + }, } } } Ok(Some(transactions)) }, - Err(err) => - Err(sp_blockchain::Error::Backend(format!("Error decoding body list: {}", err))), + Err(err) => { + Err(sp_blockchain::Error::Backend(format!("Error decoding body list: {}", err))) + }, } } } @@ -786,8 +793,9 @@ impl BlockImportOperation { count += 1; let key = crate::offchain::concatenate_prefix_and_key(&prefix, &key); match value_operation { - OffchainOverlayedChange::SetValue(val) => - transaction.set_from_vec(columns::OFFCHAIN, &key, val), + OffchainOverlayedChange::SetValue(val) => { + transaction.set_from_vec(columns::OFFCHAIN, &key, val) + }, OffchainOverlayedChange::Remove => transaction.remove(columns::OFFCHAIN, &key), } } @@ -812,7 +820,7 @@ impl BlockImportOperation { state_version: StateVersion, ) -> ClientResult { if storage.top.keys().any(|k| well_known_keys::is_child_storage_key(k)) { - return Err(sp_blockchain::Error::InvalidState) + return Err(sp_blockchain::Error::InvalidState); } let child_delta = storage.children_default.values().map(|child_content| { @@ -1172,9 +1180,9 @@ impl Backend { // Older DB versions have no last state key. Check if the state is available and set it. let info = backend.blockchain.info(); - if info.finalized_state.is_none() && - info.finalized_hash != Default::default() && - sc_client_api::Backend::have_state_at( + if info.finalized_state.is_none() + && info.finalized_hash != Default::default() + && sc_client_api::Backend::have_state_at( &backend, info.finalized_hash, info.finalized_number, @@ -1213,11 +1221,11 @@ impl Backend { let meta = self.blockchain.meta.read(); - if meta.best_number > best_number && - (meta.best_number - best_number).saturated_into::() > - self.canonicalization_delay + if meta.best_number > best_number + && (meta.best_number - best_number).saturated_into::() + > self.canonicalization_delay { - return Err(sp_blockchain::Error::SetHeadTooOld) + return Err(sp_blockchain::Error::SetHeadTooOld); } let parent_exists = @@ -1236,7 +1244,7 @@ impl Backend { (&r.number, &r.hash) ); - return Err(sp_blockchain::Error::NotInFinalizedChain) + return Err(sp_blockchain::Error::NotInFinalizedChain); } retracted.push(r.hash); @@ -1274,14 +1282,14 @@ impl Backend { ) -> ClientResult<()> { let last_finalized = last_finalized.unwrap_or_else(|| self.blockchain.meta.read().finalized_hash); - if last_finalized != self.blockchain.meta.read().genesis_hash && - *header.parent_hash() != last_finalized + if last_finalized != self.blockchain.meta.read().genesis_hash + && *header.parent_hash() != last_finalized { return Err(sp_blockchain::Error::NonSequentialFinalization(format!( "Last finalized {:?} not parent of {:?}", last_finalized, header.hash() - ))) + ))); } Ok(()) } @@ -1324,7 +1332,7 @@ impl Backend { let new_canonical = number_u64 - self.canonicalization_delay; if new_canonical <= self.storage.state_db.best_canonical().unwrap_or(0) { - return Ok(()) + return Ok(()); } let hash = if new_canonical == number_u64 { hash @@ -1341,7 +1349,7 @@ impl Backend { })? }; if !sc_client_api::Backend::have_state_at(self, hash, new_canonical.saturated_into()) { - return Ok(()) + return Ok(()); } trace!(target: "db", "Canonicalize block #{} ({:?})", new_canonical, hash); @@ -1524,8 +1532,8 @@ impl Backend { let finalized = number_u64 == 0 || pending_block.leaf_state.is_final(); finalized } else { - (number.is_zero() && last_finalized_num.is_zero()) || - pending_block.leaf_state.is_final() + (number.is_zero() && last_finalized_num.is_zero()) + || pending_block.leaf_state.is_final() }; let header = &pending_block.header; @@ -1614,11 +1622,9 @@ impl Backend { &(start, end).encode(), ); } - } else if number > best_num + One::one() && - number > One::one() && self - .blockchain - .header(BlockId::hash(parent_hash))? - .is_none() + } else if number > best_num + One::one() + && number > One::one() + && self.blockchain.header(BlockId::hash(parent_hash))?.is_none() { let gap = (best_num + One::one(), number - One::one()); transaction.set(columns::META, meta_keys::BLOCK_GAP, &gap.encode()); @@ -1660,7 +1666,7 @@ impl Backend { return Err(sp_blockchain::Error::UnknownBlock(format!( "Cannot set head {:?}", set_head - ))) + ))); } } @@ -1703,8 +1709,9 @@ impl Backend { } transaction.set_from_vec(columns::META, meta_keys::FINALIZED_BLOCK, lookup_key); - if sc_client_api::Backend::have_state_at(self, f_hash, f_num) && - self.storage + if sc_client_api::Backend::have_state_at(self, f_hash, f_num) + && self + .storage .state_db .best_canonical() .map(|c| f_num.saturated_into::() > c) @@ -1812,17 +1819,19 @@ impl Backend { id, )?; match Vec::>::decode(&mut &index[..]) { - Ok(index) => + Ok(index) => { for ex in index { if let DbExtrinsic::Indexed { hash, .. } = ex { transaction.release(columns::TRANSACTION, hash); } - }, - Err(err) => + } + }, + Err(err) => { return Err(sp_blockchain::Error::Backend(format!( "Error decoding body list: {}", err - ))), + ))) + }, } } Ok(()) @@ -2042,17 +2051,17 @@ impl sc_client_api::backend::Backend for Backend { let last_finalized = self.blockchain.last_finalized()?; // We can do a quick check first, before doing a proper but more expensive check - if number > self.blockchain.info().finalized_number || - (hash != last_finalized && !is_descendent_of(&hash, &last_finalized)?) + if number > self.blockchain.info().finalized_number + || (hash != last_finalized && !is_descendent_of(&hash, &last_finalized)?) { - return Err(ClientError::NotInFinalizedChain) + return Err(ClientError::NotInFinalizedChain); } let justifications = if let Some(mut stored_justifications) = self.blockchain.justifications(hash)? { if !stored_justifications.append(justification) { - return Err(ClientError::BadJustification("Duplicate consensus engine ID".into())) + return Err(ClientError::BadJustification("Duplicate consensus engine ID".into())); } stored_justifications } else { @@ -2138,7 +2147,7 @@ impl sc_client_api::backend::Backend for Backend { let mut revert_blocks = || -> ClientResult> { for c in 0..n.saturated_into::() { if number_to_revert.is_zero() { - return Ok(c.saturated_into::>()) + return Ok(c.saturated_into::>()); } let mut transaction = Transaction::new(); let removed = @@ -2155,7 +2164,7 @@ impl sc_client_api::backend::Backend for Backend { if prev_number == best_number { best_hash } else { *removed.parent_hash() }; if !self.have_state_at(prev_hash, prev_number) { - return Ok(c.saturated_into::>()) + return Ok(c.saturated_into::>()); } match self.storage.state_db.revert_one() { @@ -2181,11 +2190,10 @@ impl sc_client_api::backend::Backend for Backend { reverted_finalized.insert(removed_hash); if let Some((hash, _)) = self.blockchain.info().finalized_state { if hash == hash_to_revert { - if !number_to_revert.is_zero() && - self.have_state_at( - prev_hash, - number_to_revert - One::one(), - ) { + if !number_to_revert.is_zero() + && self + .have_state_at(prev_hash, number_to_revert - One::one()) + { let lookup_key = utils::number_and_hash_to_lookup_key( number_to_revert - One::one(), prev_hash, @@ -2251,7 +2259,10 @@ impl sc_client_api::backend::Backend for Backend { let best_hash = self.blockchain.info().best_hash; if best_hash == hash { - return Err(sp_blockchain::Error::Backend(format!("Can't remove best block {:?}", hash))) + return Err(sp_blockchain::Error::Backend(format!( + "Can't remove best block {:?}", + hash + ))); } let hdr = self.blockchain.header_metadata(hash)?; @@ -2259,7 +2270,7 @@ impl sc_client_api::backend::Backend for Backend { return Err(sp_blockchain::Error::UnknownBlock(format!( "State already discarded for {:?}", hash - ))) + ))); } let mut leaves = self.blockchain.leaves.write(); @@ -2267,7 +2278,7 @@ impl sc_client_api::backend::Backend for Backend { return Err(sp_blockchain::Error::Backend(format!( "Can't remove non-leaf block {:?}", hash - ))) + ))); } let mut transaction = Transaction::new(); @@ -2307,7 +2318,7 @@ impl sc_client_api::backend::Backend for Backend { if let Some(outcome) = remove_outcome { leaves.undo().undo_remove(outcome); } - return Err(e.into()) + return Err(e.into()); } self.blockchain().remove_header_metadata(hash); Ok(()) @@ -2326,7 +2337,7 @@ impl sc_client_api::backend::Backend for Backend { .build(); let state = RefTrackingState::new(db_state, self.storage.clone(), None); - return Ok(RecordStatsState::new(state, None, self.state_usage.clone())) + return Ok(RecordStatsState::new(state, None, self.state_usage.clone())); } } diff --git a/client/db/src/parity_db.rs b/client/db/src/parity_db.rs index 4adacbf6f041c..4fdf8ef6896dd 100644 --- a/client/db/src/parity_db.rs +++ b/client/db/src/parity_db.rs @@ -97,15 +97,16 @@ impl> Database for DbAdapter { Some(match change { Change::Set(col, key, value) => (col as u8, key, Some(value)), Change::Remove(col, key) => (col as u8, key, None), - Change::Store(col, key, value) => + Change::Store(col, key, value) => { if ref_counted_column(col) { (col as u8, key.as_ref().to_vec(), Some(value)) } else { if !not_ref_counted_column.contains(&col) { not_ref_counted_column.push(col); } - return None - }, + return None; + } + }, Change::Reference(col, key) => { if ref_counted_column(col) { // FIXME accessing value is not strictly needed, optimize this in parity-db. @@ -115,18 +116,19 @@ impl> Database for DbAdapter { if !not_ref_counted_column.contains(&col) { not_ref_counted_column.push(col); } - return None + return None; } }, - Change::Release(col, key) => + Change::Release(col, key) => { if ref_counted_column(col) { (col as u8, key.as_ref().to_vec(), None) } else { if !not_ref_counted_column.contains(&col) { not_ref_counted_column.push(col); } - return None - }, + return None; + } + }, }) })); @@ -134,7 +136,7 @@ impl> Database for DbAdapter { return Err(DatabaseError(Box::new(parity_db::Error::InvalidInput(format!( "Ref counted operation on non ref counted columns {:?}", not_ref_counted_column - ))))) + ))))); } result.map_err(|e| DatabaseError(Box::new(e))) diff --git a/client/db/src/upgrade.rs b/client/db/src/upgrade.rs index 51750bf689759..8350e06bd9ed1 100644 --- a/client/db/src/upgrade.rs +++ b/client/db/src/upgrade.rs @@ -163,8 +163,9 @@ fn migrate_3_to_4(db_path: &Path, _db_type: DatabaseType) -> Upgr /// If the file does not exist returns 0. fn current_version(path: &Path) -> UpgradeResult { match fs::File::open(version_file_path(path)) { - Err(ref err) if err.kind() == ErrorKind::NotFound => - Err(UpgradeError::MissingDatabaseVersionFile), + Err(ref err) if err.kind() == ErrorKind::NotFound => { + Err(UpgradeError::MissingDatabaseVersionFile) + }, Err(_) => Err(UpgradeError::UnknownDatabaseVersion), Ok(mut file) => { let mut s = String::new(); diff --git a/client/db/src/utils.rs b/client/db/src/utils.rs index 567950d089e1b..e9f88a8dab663 100644 --- a/client/db/src/utils.rs +++ b/client/db/src/utils.rs @@ -193,11 +193,12 @@ fn open_database_at( let db: Arc> = match &db_source { DatabaseSource::ParityDb { path } => open_parity_db::(path, db_type, create)?, #[cfg(feature = "rocksdb")] - DatabaseSource::RocksDb { path, cache_size } => - open_kvdb_rocksdb::(path, db_type, create, *cache_size)?, + DatabaseSource::RocksDb { path, cache_size } => { + open_kvdb_rocksdb::(path, db_type, create, *cache_size)? + }, DatabaseSource::Custom { db, require_create_flag } => { if *require_create_flag && !create { - return Err(OpenDbError::DoesNotExist) + return Err(OpenDbError::DoesNotExist); } db.clone() }, @@ -205,8 +206,9 @@ fn open_database_at( // check if rocksdb exists first, if not, open paritydb match open_kvdb_rocksdb::(rocksdb_path, db_type, false, *cache_size) { Ok(db) => db, - Err(OpenDbError::NotEnabled(_)) | Err(OpenDbError::DoesNotExist) => - open_parity_db::(paritydb_path, db_type, create)?, + Err(OpenDbError::NotEnabled(_)) | Err(OpenDbError::DoesNotExist) => { + open_parity_db::(paritydb_path, db_type, create)? + }, Err(as_is) => return Err(as_is), } }, @@ -359,13 +361,14 @@ pub fn check_database_type( db_type: DatabaseType, ) -> Result<(), OpenDbError> { match db.get(COLUMN_META, meta_keys::TYPE) { - Some(stored_type) => + Some(stored_type) => { if db_type.as_str().as_bytes() != &*stored_type { return Err(OpenDbError::UnexpectedDbType { expected: db_type, found: stored_type.to_owned(), - }) - }, + }); + } + }, None => { let mut transaction = Transaction::new(); transaction.set(COLUMN_META, meta_keys::TYPE, db_type.as_str().as_bytes()); @@ -387,8 +390,8 @@ fn maybe_migrate_to_type_subdir( // Do we have to migrate to a database-type-based subdirectory layout: // See if there's a file identifying a rocksdb or paritydb folder in the parent dir and // the target path ends in a role specific directory - if (basedir.join("db_version").exists() || basedir.join("metadata").exists()) && - (p.ends_with(DatabaseType::Full.as_str())) + if (basedir.join("db_version").exists() || basedir.join("metadata").exists()) + && (p.ends_with(DatabaseType::Full.as_str())) { // Try to open the database to check if the current `DatabaseType` matches the type of // database stored in the target directory and close the database on success. @@ -475,7 +478,7 @@ where { let genesis_hash: Block::Hash = match read_genesis_hash(db)? { Some(genesis_hash) => genesis_hash, - None => + None => { return Ok(Meta { best_hash: Default::default(), best_number: Zero::zero(), @@ -484,7 +487,8 @@ where genesis_hash: Default::default(), finalized_state: None, block_gap: None, - }), + }) + }, }; let load_meta_block = |desc, key| -> Result<_, sp_blockchain::Error> { @@ -538,8 +542,9 @@ pub fn read_genesis_hash( match db.get(COLUMN_META, meta_keys::GENESIS_HASH) { Some(h) => match Decode::decode(&mut &h[..]) { Ok(h) => Ok(Some(h)), - Err(err) => - Err(sp_blockchain::Error::Backend(format!("Error decoding genesis hash: {}", err))), + Err(err) => { + Err(sp_blockchain::Error::Backend(format!("Error decoding genesis hash: {}", err))) + }, }, None => Ok(None), } diff --git a/client/executor/benches/bench.rs b/client/executor/benches/bench.rs index fcefe408603d7..e6c5fc185503c 100644 --- a/client/executor/benches/bench.rs +++ b/client/executor/benches/bench.rs @@ -250,7 +250,7 @@ fn bench_call_instance(c: &mut Criterion) { for thread_count in thread_counts { if thread_count > num_cpus { // If there are not enough cores available the benchmark is pointless. - continue + continue; } let benchmark_name = format!( diff --git a/client/executor/common/src/runtime_blob/data_segments_snapshot.rs b/client/executor/common/src/runtime_blob/data_segments_snapshot.rs index e65fc32f637a6..ba533d09db198 100644 --- a/client/executor/common/src/runtime_blob/data_segments_snapshot.rs +++ b/client/executor/common/src/runtime_blob/data_segments_snapshot.rs @@ -49,7 +49,7 @@ impl DataSegmentsSnapshot { // [op, End] if init_expr.len() != 2 { - return Err(Error::InitializerHasTooManyExpressions) + return Err(Error::InitializerHasTooManyExpressions); } let offset = match &init_expr[0] { Instruction::I32Const(v) => *v as u32, @@ -60,7 +60,7 @@ impl DataSegmentsSnapshot { // At the moment of writing the Substrate Runtime Interface does not provide // any globals. There is nothing that prevents us from supporting this // if/when we gain those. - return Err(Error::ImportedGlobalsUnsupported) + return Err(Error::ImportedGlobalsUnsupported); }, insn => return Err(Error::InvalidInitializerExpression(format!("{:?}", insn))), }; diff --git a/client/executor/common/src/runtime_blob/runtime_blob.rs b/client/executor/common/src/runtime_blob/runtime_blob.rs index 08df4b32d59eb..05efced2ce0e5 100644 --- a/client/executor/common/src/runtime_blob/runtime_blob.rs +++ b/client/executor/common/src/runtime_blob/runtime_blob.rs @@ -151,7 +151,7 @@ impl RuntimeBlob { .entries_mut() .push(ExportEntry::new(memory_name, Internal::Memory(0))); - break + break; } Ok(()) @@ -176,7 +176,7 @@ impl RuntimeBlob { .ok_or_else(|| WasmError::Other("no memory section found".into()))?; if memory_section.entries().is_empty() { - return Err(WasmError::Other("memory section is empty".into())) + return Err(WasmError::Other("memory section is empty".into())); } for memory_ty in memory_section.entries_mut() { let min = memory_ty.limits().initial().saturating_add(extra_heap_pages); @@ -190,8 +190,9 @@ impl RuntimeBlob { pub(super) fn exported_internal_global_names(&self) -> impl Iterator { let exports = self.raw_module.export_section().map(|es| es.entries()).unwrap_or(&[]); exports.iter().filter_map(|export| match export.internal() { - Internal::Global(_) if export.field().starts_with("exported_internal_global") => - Some(export.field()), + Internal::Global(_) if export.field().starts_with("exported_internal_global") => { + Some(export.field()) + }, _ => None, }) } diff --git a/client/executor/common/src/sandbox.rs b/client/executor/common/src/sandbox.rs index 1e925bd5a7835..90a6ba1e1597a 100644 --- a/client/executor/common/src/sandbox.rs +++ b/client/executor/common/src/sandbox.rs @@ -200,12 +200,14 @@ impl SandboxInstance { sandbox_context: &mut dyn SandboxContext, ) -> std::result::Result, error::Error> { match &self.backend_instance { - BackendInstance::Wasmi(wasmi_instance) => - wasmi_invoke(self, wasmi_instance, export_name, args, state, sandbox_context), + BackendInstance::Wasmi(wasmi_instance) => { + wasmi_invoke(self, wasmi_instance, export_name, args, state, sandbox_context) + }, #[cfg(feature = "wasmer-sandbox")] - BackendInstance::Wasmer(wasmer_instance) => - wasmer_invoke(wasmer_instance, export_name, args, state, sandbox_context), + BackendInstance::Wasmer(wasmer_instance) => { + wasmer_invoke(wasmer_instance, export_name, args, state, sandbox_context) + }, } } @@ -406,8 +408,9 @@ impl BackendContext { SandboxBackend::TryWasmer => BackendContext::Wasmi, #[cfg(feature = "wasmer-sandbox")] - SandboxBackend::Wasmer | SandboxBackend::TryWasmer => - BackendContext::Wasmer(WasmerBackend::new()), + SandboxBackend::Wasmer | SandboxBackend::TryWasmer => { + BackendContext::Wasmer(WasmerBackend::new()) + }, } } } @@ -563,8 +566,9 @@ impl Store

{ BackendContext::Wasmi => wasmi_instantiate(wasm, guest_env, state, sandbox_context)?, #[cfg(feature = "wasmer-sandbox")] - BackendContext::Wasmer(ref context) => - wasmer_instantiate(context, wasm, guest_env, state, sandbox_context)?, + BackendContext::Wasmer(ref context) => { + wasmer_instantiate(context, wasm, guest_env, state, sandbox_context)? + }, }; Ok(UnregisteredInstance { sandbox_instance }) diff --git a/client/executor/common/src/sandbox/wasmer_backend.rs b/client/executor/common/src/sandbox/wasmer_backend.rs index 29926141ed8b8..0890feab65403 100644 --- a/client/executor/common/src/sandbox/wasmer_backend.rs +++ b/client/executor/common/src/sandbox/wasmer_backend.rs @@ -85,11 +85,12 @@ pub fn invoke( wasmer::Val::I64(val) => Value::I64(val), wasmer::Val::F32(val) => Value::F32(f32::to_bits(val)), wasmer::Val::F64(val) => Value::F64(f64::to_bits(val)), - _ => + _ => { return Err(Error::Sandbox(format!( "Unsupported return value: {:?}", wasm_value, - ))), + ))) + }, }; Ok(Some(wasmer_value)) @@ -160,7 +161,7 @@ pub fn instantiate( index } else { // Missing import (should we abort here?) - continue + continue; }; let supervisor_func_index = guest_env @@ -189,8 +190,9 @@ pub fn instantiate( wasmer::Instance::new(&module, &import_object).map_err(|error| match error { wasmer::InstantiationError::Link(_) => InstantiationError::Instantiation, wasmer::InstantiationError::Start(_) => InstantiationError::StartTrapped, - wasmer::InstantiationError::HostEnvInitialization(_) => - InstantiationError::EnvironmentDefinitionCorrupted, + wasmer::InstantiationError::HostEnvInitialization(_) => { + InstantiationError::EnvironmentDefinitionCorrupted + }, wasmer::InstantiationError::CpuFeature(_) => InstantiationError::CpuFeature, }) })?; @@ -217,8 +219,9 @@ fn dispatch_function( wasmer::Val::I64(val) => Ok(Value::I64(*val)), wasmer::Val::F32(val) => Ok(Value::F32(f32::to_bits(*val))), wasmer::Val::F64(val) => Ok(Value::F64(f64::to_bits(*val))), - _ => - Err(RuntimeError::new(format!("Unsupported function argument: {:?}", val))), + _ => { + Err(RuntimeError::new(format!("Unsupported function argument: {:?}", val))) + }, }) .collect::, _>>()? .encode(); @@ -246,7 +249,7 @@ fn dispatch_function( "Failed dealloction after failed write of invoke arguments", )?; - return Err(RuntimeError::new("Can't write invoke args into memory")) + return Err(RuntimeError::new("Can't write invoke args into memory")); } // Perform the actuall call diff --git a/client/executor/runtime-test/src/lib.rs b/client/executor/runtime-test/src/lib.rs index 0424ad418617b..c198400656017 100644 --- a/client/executor/runtime-test/src/lib.rs +++ b/client/executor/runtime-test/src/lib.rs @@ -451,7 +451,7 @@ where args: &[Value], ) -> Result { if args.len() != 1 { - return Err(sp_sandbox::HostError) + return Err(sp_sandbox::HostError); } let condition = args[0].as_i32().ok_or_else(|| sp_sandbox::HostError)?; if condition != 0 { @@ -465,7 +465,7 @@ where args: &[Value], ) -> Result { if args.len() != 1 { - return Err(sp_sandbox::HostError) + return Err(sp_sandbox::HostError); } let inc_by = args[0].as_i32().ok_or_else(|| sp_sandbox::HostError)?; e.counter += inc_by as u32; diff --git a/client/executor/src/native_executor.rs b/client/executor/src/native_executor.rs index 0eabffb8c87df..6013619d46954 100644 --- a/client/executor/src/native_executor.rs +++ b/client/executor/src/native_executor.rs @@ -299,7 +299,7 @@ where .map_err(|e| format!("Failed to read the static section: {:?}", e)) .map(|v| v.map(|v| v.encode()))? { - return Ok(version) + return Ok(version); } // If the blob didn't have embedded runtime version section, we fallback to the legacy diff --git a/client/executor/src/wasm_runtime.rs b/client/executor/src/wasm_runtime.rs index 991802340db61..0fb38424847f8 100644 --- a/client/executor/src/wasm_runtime.rs +++ b/client/executor/src/wasm_runtime.rs @@ -315,7 +315,7 @@ where .map(|runtime| -> Arc { Arc::new(runtime) }) }, #[cfg(feature = "wasmtime")] - WasmExecutionMethod::Compiled { instantiation_strategy } => + WasmExecutionMethod::Compiled { instantiation_strategy } => { sc_executor_wasmtime::create_runtime::( blob, sc_executor_wasmtime::Config { @@ -331,7 +331,8 @@ where }, }, ) - .map(|runtime| -> Arc { Arc::new(runtime) }), + .map(|runtime| -> Arc { Arc::new(runtime) }) + }, } } diff --git a/client/executor/wasmi/src/lib.rs b/client/executor/wasmi/src/lib.rs index 1284cc23e4c96..41341db22b994 100644 --- a/client/executor/wasmi/src/lib.rs +++ b/client/executor/wasmi/src/lib.rs @@ -163,7 +163,7 @@ impl Sandbox for FunctionExecutor { }; if self.memory.set(buf_ptr.into(), &buffer).is_err() { - return Ok(sandbox_env::ERR_OUT_OF_BOUNDS) + return Ok(sandbox_env::ERR_OUT_OF_BOUNDS); } Ok(sandbox_env::ERR_OK) @@ -188,7 +188,7 @@ impl Sandbox for FunctionExecutor { }; if sandboxed_memory.write_from(Pointer::new(offset as u32), &buffer).is_err() { - return Ok(sandbox_env::ERR_OUT_OF_BOUNDS) + return Ok(sandbox_env::ERR_OUT_OF_BOUNDS); } Ok(sandbox_env::ERR_OK) @@ -245,7 +245,7 @@ impl Sandbox for FunctionExecutor { // Serialize return value and write it back into the memory. sp_wasm_interface::ReturnValue::Value(val).using_encoded(|val| { if val.len() > return_val_len as usize { - return Err("Return value buffer is too small".into()) + return Err("Return value buffer is too small".into()); } self.write_memory(return_val, val).map_err(|_| "Return value buffer is OOB")?; Ok(sandbox_env::ERR_OK) @@ -362,14 +362,14 @@ impl<'a> wasmi::ModuleImportResolver for Resolver<'a> { for (function_index, function) in self.host_functions.iter().enumerate() { if name == function.name() { if signature == function.signature() { - return Ok(wasmi::FuncInstance::alloc_host(signature.into(), function_index)) + return Ok(wasmi::FuncInstance::alloc_host(signature.into(), function_index)); } else { return Err(wasmi::Error::Instantiation(format!( "Invalid signature for function `{}` expected `{:?}`, got `{:?}`", function.name(), signature, function.signature(), - ))) + ))); } } } @@ -392,8 +392,9 @@ impl<'a> wasmi::ModuleImportResolver for Resolver<'a> { ) -> Result { if field_name == "memory" { match &mut *self.import_memory.borrow_mut() { - Some(_) => - Err(wasmi::Error::Instantiation("Memory can not be imported twice!".into())), + Some(_) => { + Err(wasmi::Error::Instantiation("Memory can not be imported twice!".into())) + }, memory_ref @ None => { if memory_type .maximum() @@ -442,9 +443,9 @@ impl wasmi::Externals for FunctionExecutor { .map_err(|msg| Error::FunctionExecution(function.name().to_string(), msg)) .map_err(wasmi::Trap::from) .map(|v| v.map(Into::into)) - } else if self.allow_missing_func_imports && - index >= self.host_functions.len() && - index < self.host_functions.len() + self.missing_functions.len() + } else if self.allow_missing_func_imports + && index >= self.host_functions.len() + && index < self.host_functions.len() + self.missing_functions.len() { Err(Error::from(format!( "Function `{}` is only a stub. Calling a stub is not allowed.", diff --git a/client/executor/wasmtime/src/host.rs b/client/executor/wasmtime/src/host.rs index 768a6e36e2390..b9089d5b59b83 100644 --- a/client/executor/wasmtime/src/host.rs +++ b/client/executor/wasmtime/src/host.rs @@ -171,7 +171,7 @@ impl<'a> Sandbox for HostContext<'a> { }; if util::write_memory_from(&mut self.caller, buf_ptr, &buffer).is_err() { - return Ok(sandbox_env::ERR_OUT_OF_BOUNDS) + return Ok(sandbox_env::ERR_OUT_OF_BOUNDS); } Ok(sandbox_env::ERR_OK) @@ -194,7 +194,7 @@ impl<'a> Sandbox for HostContext<'a> { }; if sandboxed_memory.write_from(Pointer::new(offset as u32), &buffer).is_err() { - return Ok(sandbox_env::ERR_OUT_OF_BOUNDS) + return Ok(sandbox_env::ERR_OUT_OF_BOUNDS); } Ok(sandbox_env::ERR_OK) @@ -243,7 +243,7 @@ impl<'a> Sandbox for HostContext<'a> { // Serialize return value and write it back into the memory. sp_wasm_interface::ReturnValue::Value(val.into()).using_encoded(|val| { if val.len() > return_val_len as usize { - return Err("Return value buffer is too small".into()) + return Err("Return value buffer is too small".into()); } ::write_memory(self, return_val, val) .map_err(|_| "can't write return value")?; @@ -360,12 +360,13 @@ impl<'a, 'b> sandbox::SandboxContext for SandboxContext<'a, 'b> { ); match result { - Ok(()) => + Ok(()) => { if let Some(ret_val) = ret_vals[0].i64() { Ok(ret_val) } else { Err("Supervisor function returned unexpected result!".into()) - }, + } + }, Err(err) => Err(err.to_string().into()), } } diff --git a/client/executor/wasmtime/src/imports.rs b/client/executor/wasmtime/src/imports.rs index c80952a2541ce..c247cd24033a1 100644 --- a/client/executor/wasmtime/src/imports.rs +++ b/client/executor/wasmtime/src/imports.rs @@ -41,19 +41,20 @@ where "host doesn't provide any imports from non-env module: {}:{}", import_ty.module(), name, - ))) + ))); } match import_ty.ty() { ExternType::Func(func_ty) => { pending_func_imports.insert(name.to_owned(), (import_ty, func_ty)); }, - _ => + _ => { return Err(WasmError::Other(format!( "host doesn't provide any non function imports: {}:{}", import_ty.module(), name, - ))), + ))) + }, }; } @@ -80,7 +81,7 @@ where return Err(WasmError::Other(format!( "runtime requires function imports which are not present on the host: {}", names - ))) + ))); } } diff --git a/client/executor/wasmtime/src/instance_wrapper.rs b/client/executor/wasmtime/src/instance_wrapper.rs index feded4008068d..78f12844e6fe8 100644 --- a/client/executor/wasmtime/src/instance_wrapper.rs +++ b/client/executor/wasmtime/src/instance_wrapper.rs @@ -64,10 +64,12 @@ impl EntryPoint { let data_len = u32::from(data_len); match self.call_type { - EntryPointType::Direct { ref entrypoint } => - entrypoint.call(&mut *store, (data_ptr, data_len)), - EntryPointType::Wrapped { func, ref dispatcher } => - dispatcher.call(&mut *store, (func, data_ptr, data_len)), + EntryPointType::Direct { ref entrypoint } => { + entrypoint.call(&mut *store, (data_ptr, data_len)) + }, + EntryPointType::Wrapped { func, ref dispatcher } => { + dispatcher.call(&mut *store, (func, data_ptr, data_len)) + }, } .map_err(|trap| { let host_state = store @@ -337,7 +339,7 @@ impl InstanceWrapper { /// as a side-effect. pub fn decommit(&mut self) { if self.memory.data_size(&self.store) == 0 { - return + return; } cfg_if::cfg_if! { diff --git a/client/executor/wasmtime/src/runtime.rs b/client/executor/wasmtime/src/runtime.rs index 5bca899648c34..174c0b0c333ef 100644 --- a/client/executor/wasmtime/src/runtime.rs +++ b/client/executor/wasmtime/src/runtime.rs @@ -244,10 +244,12 @@ impl WasmInstance for WasmtimeInstance { fn get_global_const(&mut self, name: &str) -> Result> { match &mut self.strategy { - Strategy::LegacyInstanceReuse { instance_wrapper, .. } => - instance_wrapper.get_global_val(name), - Strategy::RecreateInstance(ref mut instance_creator) => - instance_creator.instantiate()?.get_global_val(name), + Strategy::LegacyInstanceReuse { instance_wrapper, .. } => { + instance_wrapper.get_global_val(name) + }, + Strategy::RecreateInstance(ref mut instance_creator) => { + instance_creator.instantiate()?.get_global_val(name) + }, } } @@ -258,8 +260,9 @@ impl WasmInstance for WasmtimeInstance { // associated with it. None }, - Strategy::LegacyInstanceReuse { instance_wrapper, .. } => - Some(instance_wrapper.base_ptr()), + Strategy::LegacyInstanceReuse { instance_wrapper, .. } => { + Some(instance_wrapper.base_ptr()) + }, } } } @@ -663,11 +666,12 @@ where }), ) }, - InstantiationStrategy::Pooling | - InstantiationStrategy::PoolingCopyOnWrite | - InstantiationStrategy::RecreateInstance | - InstantiationStrategy::RecreateInstanceCopyOnWrite => - (module, InternalInstantiationStrategy::Builtin), + InstantiationStrategy::Pooling + | InstantiationStrategy::PoolingCopyOnWrite + | InstantiationStrategy::RecreateInstance + | InstantiationStrategy::RecreateInstanceCopyOnWrite => { + (module, InternalInstantiationStrategy::Builtin) + }, } }, CodeSupplyMode::Precompiled(compiled_artifact_path) => { diff --git a/client/executor/wasmtime/src/util.rs b/client/executor/wasmtime/src/util.rs index 83745e21e86af..07f9c7213ed15 100644 --- a/client/executor/wasmtime/src/util.rs +++ b/client/executor/wasmtime/src/util.rs @@ -158,8 +158,8 @@ pub(crate) fn replace_strategy_if_broken(strategy: &mut InstantiationStrategy) { // These strategies require a working `madvise` to be sound. InstantiationStrategy::PoolingCopyOnWrite => InstantiationStrategy::Pooling, - InstantiationStrategy::RecreateInstanceCopyOnWrite | - InstantiationStrategy::LegacyInstanceReuse => InstantiationStrategy::RecreateInstance, + InstantiationStrategy::RecreateInstanceCopyOnWrite + | InstantiationStrategy::LegacyInstanceReuse => InstantiationStrategy::RecreateInstance, }; use once_cell::sync::OnceCell; diff --git a/client/finality-grandpa/src/authorities.rs b/client/finality-grandpa/src/authorities.rs index 0803e6b3c2931..b3609a747104e 100644 --- a/client/finality-grandpa/src/authorities.rs +++ b/client/finality-grandpa/src/authorities.rs @@ -183,7 +183,7 @@ where /// Get a genesis set with given authorities. pub(crate) fn genesis(initial: AuthorityList) -> Option { if Self::invalid_authority_list(&initial) { - return None + return None; } Some(AuthoritySet { @@ -204,7 +204,7 @@ where authority_set_changes: AuthoritySetChanges, ) -> Option { if Self::invalid_authority_list(&authorities) { - return None + return None; } Some(AuthoritySet { @@ -230,8 +230,8 @@ where F: Fn(&H, &H) -> Result, { let filter = |node_hash: &H, node_num: &N, _: &PendingChange| { - if number >= *node_num && - (is_descendent_of(node_hash, &hash).unwrap_or_default() || *node_hash == hash) + if number >= *node_num + && (is_descendent_of(node_hash, &hash).unwrap_or_default() || *node_hash == hash) { // Continue the search in this subtree. FilterAction::KeepNode @@ -278,7 +278,7 @@ where for change in &self.pending_forced_changes { if is_descendent_of(&change.canon_hash, best_hash)? { forced = Some((change.canon_hash.clone(), change.canon_height.clone())); - break + break; } } @@ -286,13 +286,14 @@ where for (_, _, change) in self.pending_standard_changes.roots() { if is_descendent_of(&change.canon_hash, best_hash)? { standard = Some((change.canon_hash.clone(), change.canon_height.clone())); - break + break; } } let earliest = match (forced, standard) { - (Some(forced), Some(standard)) => - Some(if forced.1 < standard.1 { forced } else { standard }), + (Some(forced), Some(standard)) => { + Some(if forced.1 < standard.1 { forced } else { standard }) + }, (Some(forced), None) => Some(forced), (None, Some(standard)) => Some(standard), (None, None) => None, @@ -344,11 +345,11 @@ where { for change in &self.pending_forced_changes { if change.canon_hash == pending.canon_hash { - return Err(Error::DuplicateAuthoritySetChange) + return Err(Error::DuplicateAuthoritySetChange); } if is_descendent_of(&change.canon_hash, &pending.canon_hash)? { - return Err(Error::MultiplePendingForcedAuthoritySetChanges) + return Err(Error::MultiplePendingForcedAuthoritySetChanges); } } @@ -391,7 +392,7 @@ where E: std::error::Error, { if Self::invalid_authority_list(&pending.next_authorities) { - return Err(Error::InvalidAuthoritySet) + return Err(Error::InvalidAuthoritySet); } match pending.delay_kind { @@ -472,8 +473,8 @@ where // check if there's any pending standard change that we depend on for (_, _, standard_change) in self.pending_standard_changes.roots() { - if standard_change.effective_number() <= median_last_finalized && - is_descendent_of(&standard_change.canon_hash, &change.canon_hash)? + if standard_change.effective_number() <= median_last_finalized + && is_descendent_of(&standard_change.canon_hash, &change.canon_hash)? { log::info!(target: "afg", "Not applying authority set change forced at block #{:?}, due to pending standard change at block #{:?}", @@ -483,7 +484,7 @@ where return Err(Error::ForcedAuthoritySetChangeDependencyUnsatisfied( standard_change.effective_number(), - )) + )); } } @@ -515,7 +516,7 @@ where }, )); - break + break; } } @@ -562,8 +563,8 @@ where // we will keep all forced changes for any later blocks and that are a // descendent of the finalized block (i.e. they are part of this branch). for change in pending_forced_changes { - if change.effective_number() > finalized_number && - is_descendent_of(&finalized_hash, &change.canon_hash)? + if change.effective_number() > finalized_number + && is_descendent_of(&finalized_hash, &change.canon_hash)? { self.pending_forced_changes.push(change) } @@ -721,7 +722,7 @@ impl AuthoritySetChanges { .map(|last_auth_change| last_auth_change.1 < block_number) .unwrap_or(false) { - return AuthoritySetChangeId::Latest + return AuthoritySetChangeId::Latest; } let idx = self @@ -734,7 +735,7 @@ impl AuthoritySetChanges { // if this is the first index but not the first set id then we are missing data. if idx == 0 && set_id != 0 { - return AuthoritySetChangeId::Unknown + return AuthoritySetChangeId::Unknown; } AuthoritySetChangeId::Set(set_id, block_number) @@ -771,7 +772,7 @@ impl AuthoritySetChanges { // if this is the first index but not the first set id then we are missing data. if idx == 0 && set_id != 0 { - return None + return None; } } diff --git a/client/finality-grandpa/src/aux_schema.rs b/client/finality-grandpa/src/aux_schema.rs index 235453ea35df1..24c541261ceb2 100644 --- a/client/finality-grandpa/src/aux_schema.rs +++ b/client/finality-grandpa/src/aux_schema.rs @@ -210,7 +210,7 @@ where backend.insert_aux(&[(SET_STATE_KEY, set_state.encode().as_slice())], &[])?; - return Ok(Some((new_set, set_state))) + return Ok(Some((new_set, set_state))); } Ok(None) @@ -274,7 +274,7 @@ where backend.insert_aux(&[(SET_STATE_KEY, set_state.encode().as_slice())], &[])?; - return Ok(Some((set, set_state))) + return Ok(Some((set, set_state))); } Ok(None) @@ -307,7 +307,7 @@ where }, }; - return Ok(Some((new_set, set_state))) + return Ok(Some((new_set, set_state))); } Ok(None) @@ -336,7 +336,7 @@ where return Ok(PersistentData { authority_set: new_set.into(), set_state: set_state.into(), - }) + }); } }, Some(1) => { @@ -346,7 +346,7 @@ where return Ok(PersistentData { authority_set: new_set.into(), set_state: set_state.into(), - }) + }); } }, Some(2) => { @@ -356,7 +356,7 @@ where return Ok(PersistentData { authority_set: new_set.into(), set_state: set_state.into(), - }) + }); } }, Some(3) => { @@ -376,11 +376,18 @@ where }, }; - return Ok(PersistentData { authority_set: set.into(), set_state: set_state.into() }) + return Ok(PersistentData { + authority_set: set.into(), + set_state: set_state.into(), + }); } }, - Some(other) => - return Err(ClientError::Backend(format!("Unsupported GRANDPA DB version: {:?}", other))), + Some(other) => { + return Err(ClientError::Backend(format!( + "Unsupported GRANDPA DB version: {:?}", + other + ))) + }, } // genesis. diff --git a/client/finality-grandpa/src/communication/gossip.rs b/client/finality-grandpa/src/communication/gossip.rs index 218b4b668c10f..3c304b9ceadad 100644 --- a/client/finality-grandpa/src/communication/gossip.rs +++ b/client/finality-grandpa/src/communication/gossip.rs @@ -166,18 +166,18 @@ impl View { fn consider_vote(&self, round: Round, set_id: SetId) -> Consider { // only from current set if set_id < self.set_id { - return Consider::RejectPast + return Consider::RejectPast; } if set_id > self.set_id { - return Consider::RejectFuture + return Consider::RejectFuture; } // only r-1 ... r+1 if round.0 > self.round.0.saturating_add(1) { - return Consider::RejectFuture + return Consider::RejectFuture; } if round.0 < self.round.0.saturating_sub(1) { - return Consider::RejectPast + return Consider::RejectPast; } Consider::Accept @@ -188,22 +188,23 @@ impl View { fn consider_global(&self, set_id: SetId, number: N) -> Consider { // only from current set if set_id < self.set_id { - return Consider::RejectPast + return Consider::RejectPast; } if set_id > self.set_id { - return Consider::RejectFuture + return Consider::RejectFuture; } // only commits which claim to prove a higher block number than // the one we're aware of. match self.last_commit { None => Consider::Accept, - Some(ref num) => + Some(ref num) => { if num < &number { Consider::Accept } else { Consider::RejectPast - }, + } + }, } } } @@ -551,22 +552,22 @@ impl Peers { Some(p) => p, }; - let invalid_change = peer.view.set_id > update.set_id || - peer.view.round > update.round && peer.view.set_id == update.set_id || - peer.view.last_commit.as_ref() > Some(&update.commit_finalized_height); + let invalid_change = peer.view.set_id > update.set_id + || peer.view.round > update.round && peer.view.set_id == update.set_id + || peer.view.last_commit.as_ref() > Some(&update.commit_finalized_height); if invalid_change { - return Err(Misbehavior::InvalidViewChange) + return Err(Misbehavior::InvalidViewChange); } let now = Instant::now(); - let duplicate_packet = (update.set_id, update.round, Some(&update.commit_finalized_height)) == - (peer.view.set_id, peer.view.round, peer.view.last_commit.as_ref()); + let duplicate_packet = (update.set_id, update.round, Some(&update.commit_finalized_height)) + == (peer.view.set_id, peer.view.round, peer.view.last_commit.as_ref()); if duplicate_packet { if let Some(last_update) = peer.view.last_update { if now < last_update + self.neighbor_rebroadcast_period / 2 { - return Err(Misbehavior::DuplicateNeighborMessage) + return Err(Misbehavior::DuplicateNeighborMessage); } } } @@ -594,7 +595,7 @@ impl Peers { // same height, because there is still a misbehavior condition based on // sending commits that are <= the best we are aware of. if peer.view.last_commit.as_ref() > Some(&new_height) { - return Err(Misbehavior::InvalidViewChange) + return Err(Misbehavior::InvalidViewChange); } peer.view.last_commit = Some(new_height); @@ -644,7 +645,7 @@ impl Peers { } else if n_authorities_added < one_and_a_half_lucky { second_stage_peers.insert(*peer_id); } else { - break + break; } } @@ -653,7 +654,7 @@ impl Peers { let n_second_stage_peers = LUCKY_PEERS.max((shuffled_peers.len() as f32).sqrt() as usize); for (peer_id, info) in &shuffled_peers { if info.roles.is_light() { - continue + continue; } if first_stage_peers.len() < LUCKY_PEERS { @@ -664,7 +665,7 @@ impl Peers { second_stage_peers.insert(*peer_id); } } else { - break + break; } } @@ -789,14 +790,15 @@ impl Inner { { let local_view = match self.local_view { None => return None, - Some(ref mut v) => + Some(ref mut v) => { if v.round == round { // Do not send neighbor packets out if `round` has not changed --- // such behavior is punishable. - return None + return None; } else { v - }, + } + }, }; let set_id = local_view.set_id; @@ -818,10 +820,10 @@ impl Inner { { let local_view = match self.local_view { ref mut x @ None => x.get_or_insert(LocalView::new(set_id, Round(1))), - Some(ref mut v) => + Some(ref mut v) => { if v.set_id == set_id { - let diff_authorities = self.authorities.iter().collect::>() != - authorities.iter().collect::>(); + let diff_authorities = self.authorities.iter().collect::>() + != authorities.iter().collect::>(); if diff_authorities { debug!(target: "afg", @@ -833,10 +835,11 @@ impl Inner { } // Do not send neighbor packets out if the `set_id` has not changed --- // such behavior is punishable. - return None + return None; } else { v - }, + } + }, }; local_view.update_set(set_id); @@ -858,12 +861,13 @@ impl Inner { { match self.local_view { None => return None, - Some(ref mut v) => + Some(ref mut v) => { if v.last_commit_height() < Some(&finalized) { v.last_commit = Some((finalized, round, set_id)); } else { - return None - }, + return None; + } + }, }; } @@ -903,10 +907,12 @@ impl Inner { ) -> Action { match self.consider_vote(full.round, full.set_id) { Consider::RejectFuture => return Action::Discard(Misbehavior::FutureMessage.cost()), - Consider::RejectOutOfScope => - return Action::Discard(Misbehavior::OutOfScopeMessage.cost()), - Consider::RejectPast => - return Action::Discard(self.cost_past_rejection(who, full.round, full.set_id)), + Consider::RejectOutOfScope => { + return Action::Discard(Misbehavior::OutOfScopeMessage.cost()) + }, + Consider::RejectPast => { + return Action::Discard(self.cost_past_rejection(who, full.round, full.set_id)) + }, Consider::Accept => {}, } @@ -919,7 +925,7 @@ impl Inner { "afg.bad_msg_signature"; "signature" => ?full.message.id, ); - return Action::Discard(cost::UNKNOWN_VOTER) + return Action::Discard(cost::UNKNOWN_VOTER); } if !sp_finality_grandpa::check_message_signature( @@ -936,7 +942,7 @@ impl Inner { "afg.bad_msg_signature"; "signature" => ?full.message.id, ); - return Action::Discard(cost::BAD_SIGNATURE) + return Action::Discard(cost::BAD_SIGNATURE); } let topic = super::round_topic::(full.round.0, full.set_id.0); @@ -949,20 +955,22 @@ impl Inner { full: &FullCommitMessage, ) -> Action { if let Err(misbehavior) = self.peers.update_commit_height(who, full.message.target_number) { - return Action::Discard(misbehavior.cost()) + return Action::Discard(misbehavior.cost()); } match self.consider_global(full.set_id, full.message.target_number) { Consider::RejectFuture => return Action::Discard(Misbehavior::FutureMessage.cost()), - Consider::RejectPast => - return Action::Discard(self.cost_past_rejection(who, full.round, full.set_id)), - Consider::RejectOutOfScope => - return Action::Discard(Misbehavior::OutOfScopeMessage.cost()), + Consider::RejectPast => { + return Action::Discard(self.cost_past_rejection(who, full.round, full.set_id)) + }, + Consider::RejectOutOfScope => { + return Action::Discard(Misbehavior::OutOfScopeMessage.cost()) + }, Consider::Accept => {}, } - if full.message.precommits.len() != full.message.auth_data.len() || - full.message.precommits.is_empty() + if full.message.precommits.len() != full.message.auth_data.len() + || full.message.precommits.is_empty() { debug!(target: "afg", "Malformed compact commit"); telemetry!( @@ -973,7 +981,7 @@ impl Inner { "auth_data_len" => ?full.message.auth_data.len(), "precommits_is_empty" => ?full.message.precommits.is_empty(), ); - return Action::Discard(cost::MALFORMED_COMMIT) + return Action::Discard(cost::MALFORMED_COMMIT); } // always discard commits initially and rebroadcast after doing full @@ -990,19 +998,19 @@ impl Inner { match &self.pending_catch_up { PendingCatchUp::Requesting { who: peer, request, instant } => { if peer != who { - return Action::Discard(Misbehavior::OutOfScopeMessage.cost()) + return Action::Discard(Misbehavior::OutOfScopeMessage.cost()); } if request.set_id != full.set_id { - return Action::Discard(cost::MALFORMED_CATCH_UP) + return Action::Discard(cost::MALFORMED_CATCH_UP); } if request.round.0 > full.message.round_number { - return Action::Discard(cost::MALFORMED_CATCH_UP) + return Action::Discard(cost::MALFORMED_CATCH_UP); } if full.message.prevotes.is_empty() || full.message.precommits.is_empty() { - return Action::Discard(cost::MALFORMED_CATCH_UP) + return Action::Discard(cost::MALFORMED_CATCH_UP); } // move request to pending processing state, we won't push out @@ -1046,25 +1054,26 @@ impl Inner { // race where the peer sent us the request before it observed that // we had transitioned to a new set. In this case we charge a lower // cost. - if request.set_id.0.saturating_add(1) == local_view.set_id.0 && - local_view.round.0.saturating_sub(CATCH_UP_THRESHOLD) == 0 + if request.set_id.0.saturating_add(1) == local_view.set_id.0 + && local_view.round.0.saturating_sub(CATCH_UP_THRESHOLD) == 0 { - return (None, Action::Discard(cost::HONEST_OUT_OF_SCOPE_CATCH_UP)) + return (None, Action::Discard(cost::HONEST_OUT_OF_SCOPE_CATCH_UP)); } - return (None, Action::Discard(Misbehavior::OutOfScopeMessage.cost())) + return (None, Action::Discard(Misbehavior::OutOfScopeMessage.cost())); } match self.peers.peer(who) { None => return (None, Action::Discard(Misbehavior::OutOfScopeMessage.cost())), - Some(peer) if peer.view.round >= request.round => - return (None, Action::Discard(Misbehavior::OutOfScopeMessage.cost())), + Some(peer) if peer.view.round >= request.round => { + return (None, Action::Discard(Misbehavior::OutOfScopeMessage.cost())) + }, _ => {}, } let last_completed_round = set_state.read().last_completed_round(); if last_completed_round.number < request.round.0 { - return (None, Action::Discard(Misbehavior::OutOfScopeMessage.cost())) + return (None, Action::Discard(Misbehavior::OutOfScopeMessage.cost())); } trace!(target: "afg", "Replying to catch-up request for round {} from {} with round {}", @@ -1129,9 +1138,9 @@ impl Inner { // won't be able to reply since they don't follow the full GRANDPA // protocol and therefore might not have the vote data available. if let (Some(peer), Some(local_view)) = (self.peers.peer(who), &self.local_view) { - if self.catch_up_config.request_allowed(peer) && - peer.view.set_id == local_view.set_id && - peer.view.round.0.saturating_sub(CATCH_UP_THRESHOLD) > local_view.round.0 + if self.catch_up_config.request_allowed(peer) + && peer.view.set_id == local_view.set_id + && peer.view.round.0.saturating_sub(CATCH_UP_THRESHOLD) > local_view.round.0 { // send catch up request if allowed let round = peer.view.round.0 - 1; // peer.view.round is > 0 @@ -1164,8 +1173,9 @@ impl Inner { let update_res = self.peers.update_peer_state(who, update); let (cost_benefit, topics) = match update_res { - Ok(view) => - (benefit::NEIGHBOR_MESSAGE, view.map(|view| neighbor_topics::(view))), + Ok(view) => { + (benefit::NEIGHBOR_MESSAGE, view.map(|view| neighbor_topics::(view))) + }, Err(misbehavior) => (misbehavior.cost(), None), }; @@ -1218,7 +1228,7 @@ impl Inner { let report = match &self.pending_catch_up { PendingCatchUp::Requesting { who: peer, instant, .. } => { if instant.elapsed() <= CATCH_UP_REQUEST_TIMEOUT { - return (false, None) + return (false, None); } else { // report peer for timeout Some((*peer, cost::CATCH_UP_REQUEST_TIMEOUT)) @@ -1226,7 +1236,7 @@ impl Inner { }, PendingCatchUp::Processing { instant, .. } => { if instant.elapsed() < CATCH_UP_PROCESS_TIMEOUT { - return (false, None) + return (false, None); } else { None } @@ -1263,8 +1273,8 @@ impl Inner { if round_elapsed < round_duration.mul_f32(PROPAGATION_SOME) { self.peers.first_stage_peers.contains(who) } else if round_elapsed < round_duration.mul_f32(PROPAGATION_ALL) { - self.peers.first_stage_peers.contains(who) || - self.peers.second_stage_peers.contains(who) + self.peers.first_stage_peers.contains(who) + || self.peers.second_stage_peers.contains(who) } else { self.peers.peer(who).map(|info| !info.roles.is_light()).unwrap_or(false) } @@ -1292,9 +1302,9 @@ impl Inner { }; if round_elapsed < round_duration.mul_f32(PROPAGATION_ALL) { - self.peers.first_stage_peers.contains(who) || - self.peers.second_stage_peers.contains(who) || - self.peers.lucky_light_peers.contains(who) + self.peers.first_stage_peers.contains(who) + || self.peers.second_stage_peers.contains(who) + || self.peers.lucky_light_peers.contains(who) } else { true } @@ -1582,7 +1592,7 @@ impl sc_network_gossip::Validator for GossipValidator sc_network_gossip::Validator for GossipValidator sc_network_gossip::Validator for GossipValidator false, Ok(GossipMessage::CatchUpRequest(_)) => false, @@ -1663,8 +1673,10 @@ impl sc_network_gossip::Validator for GossipValidator // we expire any commit message that doesn't target the same block // as our best commit or isn't from the same round and set id - !(full.message.target_number == number && - full.round == round && full.set_id == set_id), + { + !(full.message.target_number == number + && full.round == round && full.set_id == set_id) + }, None => true, }, Ok(_) => true, @@ -2340,8 +2352,8 @@ mod tests { let test = |rounds_elapsed, peers| { // rewind n round durations - val.inner.write().local_view.as_mut().unwrap().round_start = Instant::now() - - Duration::from_millis( + val.inner.write().local_view.as_mut().unwrap().round_start = Instant::now() + - Duration::from_millis( (round_duration.as_millis() as f32 * rounds_elapsed) as u64, ); diff --git a/client/finality-grandpa/src/communication/mod.rs b/client/finality-grandpa/src/communication/mod.rs index 75a7697812c6c..ba1bdf21bcc05 100644 --- a/client/finality-grandpa/src/communication/mod.rs +++ b/client/finality-grandpa/src/communication/mod.rs @@ -347,7 +347,7 @@ impl> NetworkBridge { // check signature. if !voters.contains(&msg.message.id) { debug!(target: "afg", "Skipping message from unknown voter {}", msg.message.id); - return future::ready(None) + return future::ready(None); } if voters.len().get() <= TELEMETRY_VOTERS_LIMIT { @@ -481,10 +481,11 @@ impl> Future for NetworkBridge { Poll::Ready(Some((to, packet))) => { self.gossip_engine.lock().send_message(to, packet.encode()); }, - Poll::Ready(None) => + Poll::Ready(None) => { return Poll::Ready(Err(Error::Network( "Neighbor packet worker stream closed.".into(), - ))), + ))) + }, Poll::Pending => break, } } @@ -494,17 +495,19 @@ impl> Future for NetworkBridge { Poll::Ready(Some(PeerReport { who, cost_benefit })) => { self.gossip_engine.lock().report(who, cost_benefit); }, - Poll::Ready(None) => + Poll::Ready(None) => { return Poll::Ready(Err(Error::Network( "Gossip validator report stream closed.".into(), - ))), + ))) + }, Poll::Pending => break, } } match self.gossip_engine.lock().poll_unpin(cx) { - Poll::Ready(()) => - return Poll::Ready(Err(Error::Network("Gossip engine future finished.".into()))), + Poll::Ready(()) => { + return Poll::Ready(Err(Error::Network("Gossip engine future finished.".into()))) + }, Poll::Pending => {}, } @@ -552,7 +555,7 @@ fn incoming_global( gossip_engine.lock().report(who, cost); } - return None + return None; } let round = msg.round; @@ -604,7 +607,7 @@ fn incoming_global( gossip_engine.lock().report(who, cost); } - return None + return None; } let cb = move |outcome| { @@ -637,10 +640,12 @@ fn incoming_global( }) .filter_map(move |(notification, msg)| { future::ready(match msg { - GossipMessage::Commit(msg) => - process_commit(msg, notification, &gossip_engine, &gossip_validator, &voters), - GossipMessage::CatchUp(msg) => - process_catch_up(msg, notification, &gossip_engine, &gossip_validator, &voters), + GossipMessage::Commit(msg) => { + process_commit(msg, notification, &gossip_engine, &gossip_validator, &voters) + }, + GossipMessage::CatchUp(msg) => { + process_catch_up(msg, notification, &gossip_engine, &gossip_validator, &voters) + }, _ => { debug!(target: "afg", "Skipping unknown message type"); None @@ -772,7 +777,7 @@ impl Sink> for OutgoingMessages { // forward the message to the inner sender. return self.sender.start_send(signed).map_err(|e| { Error::Network(format!("Failed to start_send on channel sender: {:?}", e)) - }) + }); }; Ok(()) @@ -810,16 +815,16 @@ fn check_compact_commit( if let Some(weight) = voters.get(id).map(|info| info.weight()) { total_weight += weight.get(); if total_weight > full_threshold { - return Err(cost::MALFORMED_COMMIT) + return Err(cost::MALFORMED_COMMIT); } } else { debug!(target: "afg", "Skipping commit containing unknown voter {}", id); - return Err(cost::MALFORMED_COMMIT) + return Err(cost::MALFORMED_COMMIT); } } if total_weight < voters.threshold().get() { - return Err(cost::MALFORMED_COMMIT) + return Err(cost::MALFORMED_COMMIT); } // check signatures on all contained precommits. @@ -852,7 +857,7 @@ fn check_compact_commit( } .cost(); - return Err(cost) + return Err(cost); } } @@ -883,16 +888,16 @@ fn check_catch_up( if let Some(weight) = voters.get(id).map(|info| info.weight()) { total_weight += weight.get(); if total_weight > full_threshold { - return Err(cost::MALFORMED_CATCH_UP) + return Err(cost::MALFORMED_CATCH_UP); } } else { debug!(target: "afg", "Skipping catch up message containing unknown voter {}", id); - return Err(cost::MALFORMED_CATCH_UP) + return Err(cost::MALFORMED_CATCH_UP); } } if total_weight < voters.threshold().get() { - return Err(cost::MALFORMED_CATCH_UP) + return Err(cost::MALFORMED_CATCH_UP); } Ok(()) @@ -935,7 +940,7 @@ fn check_catch_up( } .cost(); - return Err(cost) + return Err(cost); } } @@ -1018,7 +1023,7 @@ impl Sink<(RoundNumber, Commit)> for CommitsOut), ) -> Result<(), Self::Error> { if !self.is_voter { - return Ok(()) + return Ok(()); } let (round, commit) = input; diff --git a/client/finality-grandpa/src/communication/periodic.rs b/client/finality-grandpa/src/communication/periodic.rs index c001796b5ca5d..31b7c23377deb 100644 --- a/client/finality-grandpa/src/communication/periodic.rs +++ b/client/finality-grandpa/src/communication/periodic.rs @@ -89,7 +89,7 @@ impl Stream for NeighborPacketWorker { this.delay.reset(this.rebroadcast_period); this.last = Some((to.clone(), packet.clone())); - return Poll::Ready(Some((to, GossipMessage::::from(packet)))) + return Poll::Ready(Some((to, GossipMessage::::from(packet)))); }, // Don't return yet, maybe the timer fired. Poll::Pending => {}, @@ -108,7 +108,7 @@ impl Stream for NeighborPacketWorker { while let Poll::Ready(()) = this.delay.poll_unpin(cx) {} if let Some((ref to, ref packet)) = this.last { - return Poll::Ready(Some((to.clone(), GossipMessage::::from(packet.clone())))) + return Poll::Ready(Some((to.clone(), GossipMessage::::from(packet.clone())))); } Poll::Pending diff --git a/client/finality-grandpa/src/communication/tests.rs b/client/finality-grandpa/src/communication/tests.rs index eab7bb2df50cf..1909c96c84781 100644 --- a/client/finality-grandpa/src/communication/tests.rs +++ b/client/finality-grandpa/src/communication/tests.rs @@ -201,10 +201,11 @@ impl Tester { futures::future::poll_fn(move |cx| loop { match Stream::poll_next(Pin::new(&mut s.as_mut().unwrap().events), cx) { Poll::Ready(None) => panic!("concluded early"), - Poll::Ready(Some(item)) => + Poll::Ready(Some(item)) => { if pred(item) { - return Poll::Ready(s.take().unwrap()) - }, + return Poll::Ready(s.take().unwrap()); + } + }, Poll::Pending => return Poll::Pending, } }) @@ -546,8 +547,9 @@ fn bad_commit_leads_to_report() { let fut = future::join(send_message, handle_commit) .then(move |(tester, ())| { tester.filter_network_events(move |event| match event { - Event::Report(who, cost_benefit) => - who == id && cost_benefit == super::cost::INVALID_COMMIT, + Event::Report(who, cost_benefit) => { + who == id && cost_benefit == super::cost::INVALID_COMMIT + }, _ => false, }) }) diff --git a/client/finality-grandpa/src/environment.rs b/client/finality-grandpa/src/environment.rs index f235c3a86c04e..40d3b3efb0b61 100644 --- a/client/finality-grandpa/src/environment.rs +++ b/client/finality-grandpa/src/environment.rs @@ -281,8 +281,8 @@ impl HasVoted
{ pub fn propose(&self) -> Option<&PrimaryPropose
> { match self { HasVoted::Yes(_, Vote::Propose(propose)) => Some(propose), - HasVoted::Yes(_, Vote::Prevote(propose, _)) | - HasVoted::Yes(_, Vote::Precommit(propose, _, _)) => propose.as_ref(), + HasVoted::Yes(_, Vote::Prevote(propose, _)) + | HasVoted::Yes(_, Vote::Precommit(propose, _, _)) => propose.as_ref(), _ => None, } } @@ -290,8 +290,8 @@ impl HasVoted
{ /// Returns the prevote we should vote with (if any.) pub fn prevote(&self) -> Option<&Prevote
> { match self { - HasVoted::Yes(_, Vote::Prevote(_, prevote)) | - HasVoted::Yes(_, Vote::Precommit(_, prevote, _)) => Some(prevote), + HasVoted::Yes(_, Vote::Prevote(_, prevote)) + | HasVoted::Yes(_, Vote::Precommit(_, prevote, _)) => Some(prevote), _ => None, } } @@ -494,7 +494,7 @@ where if *equivocation.offender() == local_id { return Err(Error::Safety( "Refraining from sending equivocation report for our own equivocation.".into(), - )) + )); } } @@ -517,8 +517,9 @@ where // find the hash of the latest block in the current set let current_set_latest_hash = match next_change { - Some((_, n)) if n.is_zero() => - return Err(Error::Safety("Authority set change signalled at genesis.".to_string())), + Some((_, n)) if n.is_zero() => { + return Err(Error::Safety("Authority set change signalled at genesis.".to_string())) + }, // the next set starts at `n` so the current one lasts until `n - 1`. if // `n` is later than the best block, then the current set is still live // at best block. @@ -552,7 +553,7 @@ where Some(proof) => proof, None => { debug!(target: "afg", "Equivocation offender is not part of the authority set."); - return Ok(()) + return Ok(()); }, }; @@ -601,7 +602,7 @@ where Client: HeaderMetadata, { if base == block { - return Err(GrandpaError::NotDescendent) + return Err(GrandpaError::NotDescendent); } let tree_route_res = sp_blockchain::tree_route(&**client, block, base); @@ -612,12 +613,12 @@ where debug!(target: "afg", "Encountered error computing ancestry between block {:?} and base {:?}: {}", block, base, e); - return Err(GrandpaError::NotDescendent) + return Err(GrandpaError::NotDescendent); }, }; if tree_route.common_block().hash != base { - return Err(GrandpaError::NotDescendent) + return Err(GrandpaError::NotDescendent); } // skip one because our ancestry is meant to start from the parent of `block`, @@ -688,7 +689,7 @@ where // before activating the new set. the `authority_set` is updated immediately thus // we restrict the voter based on that. if set_id != authority_set.set_id() { - return Ok(None) + return Ok(None); } best_chain_containing(block, client, authority_set, select_chain, voting_rule) @@ -707,12 +708,13 @@ where let local_id = local_authority_id(&self.voters, self.config.keystore.as_ref()); let has_voted = match self.voter_set_state.has_voted(round) { - HasVoted::Yes(id, vote) => + HasVoted::Yes(id, vote) => { if local_id.as_ref().map(|k| k == &id).unwrap_or(false) { HasVoted::Yes(id, vote) } else { HasVoted::No - }, + } + }, HasVoted::No => HasVoted::No, }; @@ -788,7 +790,7 @@ where // we've already proposed in this round (in a previous run), // ignore the given vote and don't update the voter set // state - return Ok(None) + return Ok(None); } let mut current_rounds = current_rounds.clone(); @@ -846,7 +848,7 @@ where // we've already prevoted in this round (in a previous run), // ignore the given vote and don't update the voter set // state - return Ok(None) + return Ok(None); } // report to telemetry and prometheus @@ -909,7 +911,7 @@ where // we've already precommitted in this round (in a previous run), // ignore the given vote and don't update the voter set // state - return Ok(None) + return Ok(None); } // report to telemetry and prometheus @@ -920,7 +922,7 @@ where HasVoted::Yes(_, Vote::Prevote(_, prevote)) => prevote, _ => { let msg = "Voter precommitting before prevoting."; - return Err(Error::Safety(msg.to_string())) + return Err(Error::Safety(msg.to_string())); }, }; @@ -972,7 +974,7 @@ where (completed_rounds, current_rounds) } else { let msg = "Voter acting while in paused state."; - return Err(Error::Safety(msg.to_string())) + return Err(Error::Safety(msg.to_string())); }; let mut completed_rounds = completed_rounds.clone(); @@ -1032,7 +1034,7 @@ where (completed_rounds, current_rounds) } else { let msg = "Voter acting while in paused state."; - return Err(Error::Safety(msg.to_string())) + return Err(Error::Safety(msg.to_string())); }; let mut completed_rounds = completed_rounds.clone(); @@ -1163,7 +1165,7 @@ where block, ); - return Ok(None) + return Ok(None); }, }; @@ -1197,7 +1199,7 @@ where } if *target_header.number() == target_number { - break + break; } target_header = client @@ -1228,8 +1230,8 @@ where .await .filter(|(_, restricted_number)| { // we can only restrict votes within the interval [base, target] - restricted_number >= base_header.number() && - restricted_number < target_header.number() + restricted_number >= base_header.number() + && restricted_number < target_header.number() }) .or_else(|| Some((target_header.hash(), *target_header.number()))) }, @@ -1279,7 +1281,7 @@ where status.finalized_number, ); - return Ok(()) + return Ok(()); } // FIXME #1483: clone only when changed @@ -1327,10 +1329,10 @@ where if !justification_required { if let Some(justification_period) = justification_period { let last_finalized_number = client.info().finalized_number; - justification_required = (!last_finalized_number.is_zero() || - number - last_finalized_number == justification_period) && - (last_finalized_number / justification_period != - number / justification_period); + justification_required = (!last_finalized_number.is_zero() + || number - last_finalized_number == justification_period) + && (last_finalized_number / justification_period + != number / justification_period); } } @@ -1414,7 +1416,7 @@ where warn!(target: "afg", "Failed to write updated authority set to disk. Bailing."); warn!(target: "afg", "Node is in a potentially inconsistent state."); - return Err(e.into()) + return Err(e.into()); } } diff --git a/client/finality-grandpa/src/finality_proof.rs b/client/finality-grandpa/src/finality_proof.rs index 453b41bc63468..fc4799aa28856 100644 --- a/client/finality-grandpa/src/finality_proof.rs +++ b/client/finality-grandpa/src/finality_proof.rs @@ -111,7 +111,7 @@ where { changes } else { - return Ok(None) + return Ok(None); }; prove_finality(&*self.backend, authority_set_changes, block) @@ -164,7 +164,7 @@ where block, info.finalized_number, ); trace!(target: "afg", "{}", &err); - return Err(FinalityProofError::BlockNotYetFinalized) + return Err(FinalityProofError::BlockNotYetFinalized); } let (justification, just_block) = match authority_set_changes.get_set_id(block) { @@ -179,7 +179,7 @@ where "No justification found for the latest finalized block. \ Returning empty proof.", ); - return Ok(None) + return Ok(None); } }, AuthoritySetChangeId::Set(_, last_block_for_set) => { @@ -199,7 +199,7 @@ where Returning empty proof.", block, ); - return Ok(None) + return Ok(None); }; (justification, last_block_for_set) }, @@ -210,7 +210,7 @@ where You need to resync to populate AuthoritySetChanges properly.", block, ); - return Err(FinalityProofError::BlockNotInAuthoritySetChanges) + return Err(FinalityProofError::BlockNotInAuthoritySetChanges); }, }; @@ -220,7 +220,7 @@ where let mut current = block + One::one(); loop { if current > just_block || headers.len() >= MAX_UNKNOWN_HEADERS { - break + break; } headers.push(backend.blockchain().expect_header(BlockId::Number(current))?); current += One::one(); diff --git a/client/finality-grandpa/src/import.rs b/client/finality-grandpa/src/import.rs index 3715287eea31f..17cd860b01082 100644 --- a/client/finality-grandpa/src/import.rs +++ b/client/finality-grandpa/src/import.rs @@ -105,9 +105,9 @@ where self.authority_set.inner().pending_changes().cloned().collect(); for pending_change in pending_changes { - if pending_change.delay_kind == DelayKind::Finalized && - pending_change.effective_number() > chain_info.finalized_number && - pending_change.effective_number() <= chain_info.best_number + if pending_change.delay_kind == DelayKind::Finalized + && pending_change.effective_number() > chain_info.finalized_number + && pending_change.effective_number() <= chain_info.best_number { let effective_block_hash = if !pending_change.delay.is_zero() { self.select_chain @@ -243,7 +243,7 @@ where ) -> Option>> { // check for forced authority set hard forks if let Some(change) = self.authority_set_hard_forks.get(&hash) { - return Some(change.clone()) + return Some(change.clone()); } // check for forced change. @@ -254,7 +254,7 @@ where canon_height: *header.number(), canon_hash: hash, delay_kind: DelayKind::Best { median_last_finalized }, - }) + }); } // check normal scheduled change. @@ -445,7 +445,7 @@ where self.inner.storage(hash, &sc_client_api::StorageKey(k.to_vec())) { if let Ok(id) = SetId::decode(&mut id.0.as_ref()) { - return Ok(id) + return Ok(id); } } } @@ -539,14 +539,14 @@ where Ok(BlockStatus::InChain) => { // Strip justifications when re-importing an existing block. let _justifications = block.justifications.take(); - return (&*self.inner).import_block(block, new_cache).await + return (&*self.inner).import_block(block, new_cache).await; }, Ok(BlockStatus::Unknown) => {}, Err(e) => return Err(ConsensusError::ClientImport(e.to_string())), } if block.with_state() { - return self.import_state(block, new_cache).await + return self.import_state(block, new_cache).await; } if number <= self.inner.info().finalized_number { @@ -557,7 +557,7 @@ where "Justification required when importing \ an old block with authority set change." .into(), - )) + )); } assert!(block.justifications.is_some()); let mut authority_set = self.authority_set.inner_locked(); @@ -572,7 +572,7 @@ where }, ); } - return (&*self.inner).import_block(block, new_cache).await + return (&*self.inner).import_block(block, new_cache).await; } // on initial sync we will restrict logging under info to avoid spam. @@ -594,7 +594,7 @@ where r, ); pending_changes.revert(); - return Ok(r) + return Ok(r); }, Err(e) => { debug!( @@ -603,7 +603,7 @@ where e, ); pending_changes.revert(); - return Err(ConsensusError::ClientImport(e.to_string())) + return Err(ConsensusError::ClientImport(e.to_string())); }, } }; @@ -675,7 +675,7 @@ where } }); }, - None => + None => { if needs_justification { debug!( target: "afg", @@ -684,7 +684,8 @@ where ); imported_aux.needs_justification = true; - }, + } + }, } Ok(ImportResult::Imported(imported_aux)) @@ -774,7 +775,7 @@ where // justification import pipeline similar to what we do for `BlockImport`. In the // meantime we'll just drop the justification, since this is only used for BEEFY which // is still WIP. - return Ok(()) + return Ok(()); } let justification = GrandpaJustification::decode_and_verify_finalizes( @@ -814,7 +815,7 @@ where // send the command to the voter let _ = self.send_voter_commands.unbounded_send(command); }, - Err(CommandOrError::Error(e)) => + Err(CommandOrError::Error(e)) => { return Err(match e { Error::Grandpa(error) => ConsensusError::ClientImport(error.to_string()), Error::Network(error) => ConsensusError::ClientImport(error), @@ -824,7 +825,8 @@ where Error::Signing(error) => ConsensusError::ClientImport(error), Error::Timer(error) => ConsensusError::ClientImport(error.to_string()), Error::RuntimeApi(error) => ConsensusError::ClientImport(error.to_string()), - }), + }) + }, Ok(_) => { assert!( !enacts_change, diff --git a/client/finality-grandpa/src/justification.rs b/client/finality-grandpa/src/justification.rs index 56b26c964ce9b..41ae4bf1aa7e8 100644 --- a/client/finality-grandpa/src/justification.rs +++ b/client/finality-grandpa/src/justification.rs @@ -101,7 +101,7 @@ impl GrandpaJustification { let mut current_hash = signed.precommit.target_hash; loop { if current_hash == base_hash { - break + break; } match client.header(BlockId::Hash(current_hash))? { @@ -110,7 +110,7 @@ impl GrandpaJustification { // as base and only traverse backwards from the other blocks // in the commit. but better be safe to avoid an unbound loop. if *current_header.number() <= base_number { - return error() + return error(); } let parent_hash = *current_header.parent_hash(); @@ -183,7 +183,7 @@ impl GrandpaJustification { Ok(ref result) if result.is_valid() => {}, _ => { let msg = "invalid commit in grandpa justification".to_string(); - return Err(ClientError::BadJustification(msg)) + return Err(ClientError::BadJustification(msg)); }, } @@ -218,11 +218,11 @@ impl GrandpaJustification { ) { return Err(ClientError::BadJustification( "invalid signature for precommit in grandpa justification".to_string(), - )) + )); } if base_hash == signed.precommit.target_hash { - continue + continue; } match ancestry_chain.ancestry(base_hash, signed.precommit.target_hash) { @@ -234,10 +234,11 @@ impl GrandpaJustification { visited_hashes.insert(hash); } }, - _ => + _ => { return Err(ClientError::BadJustification( "invalid precommit ancestry proof in grandpa justification".to_string(), - )), + )) + }, } } @@ -252,7 +253,7 @@ impl GrandpaJustification { return Err(ClientError::BadJustification( "invalid precommit ancestries in grandpa justification with unused headers" .to_string(), - )) + )); } Ok(()) @@ -293,7 +294,7 @@ where let mut current_hash = block; loop { if current_hash == base { - break + break; } match self.ancestry.get(¤t_hash) { Some(current_header) => { diff --git a/client/finality-grandpa/src/lib.rs b/client/finality-grandpa/src/lib.rs index a7326d57c2bf0..843d09cbd562e 100644 --- a/client/finality-grandpa/src/lib.rs +++ b/client/finality-grandpa/src/lib.rs @@ -1082,11 +1082,11 @@ where // voters don't conclude naturally return Poll::Ready(Err(Error::Safety( "finality-grandpa inner voter has concluded.".into(), - ))) + ))); }, Poll::Ready(Err(CommandOrError::Error(e))) => { // return inner observer error - return Poll::Ready(Err(e)) + return Poll::Ready(Err(e)); }, Poll::Ready(Err(CommandOrError::VoterCommand(command))) => { // some command issued internally @@ -1099,7 +1099,7 @@ where Poll::Pending => {}, Poll::Ready(None) => { // the `voter_commands_rx` stream should never conclude since it's never closed. - return Poll::Ready(Err(Error::Safety("`voter_commands_rx` was closed.".into()))) + return Poll::Ready(Err(Error::Safety("`voter_commands_rx` was closed.".into()))); }, Poll::Ready(Some(command)) => { // some command issued externally @@ -1142,7 +1142,7 @@ where let revertible = blocks.min(best_number - finalized); if revertible == Zero::zero() { - return Ok(()) + return Ok(()); } let number = best_number - revertible; diff --git a/client/finality-grandpa/src/observer.rs b/client/finality-grandpa/src/observer.rs index 9bcb03c0555c2..a6be7bfa67d75 100644 --- a/client/finality-grandpa/src/observer.rs +++ b/client/finality-grandpa/src/observer.rs @@ -97,14 +97,14 @@ where }, voter::CommunicationIn::CatchUp(..) => { // ignore catch up messages - return future::ok(last_finalized_number) + return future::ok(last_finalized_number); }, }; // if the commit we've received targets a block lower or equal to the last // finalized, ignore it and continue with the current state if commit.target_number <= last_finalized_number { - return future::ok(last_finalized_number) + return future::ok(last_finalized_number); } let validation_result = match finality_grandpa::validate_commit( @@ -363,11 +363,11 @@ where Poll::Ready(Ok(())) => { // observer commit stream doesn't conclude naturally; this could reasonably be an // error. - return Poll::Ready(Ok(())) + return Poll::Ready(Ok(())); }, Poll::Ready(Err(CommandOrError::Error(e))) => { // return inner observer error - return Poll::Ready(Err(e)) + return Poll::Ready(Err(e)); }, Poll::Ready(Err(CommandOrError::VoterCommand(command))) => { // some command issued internally @@ -380,7 +380,7 @@ where Poll::Pending => {}, Poll::Ready(None) => { // the `voter_commands_rx` stream should never conclude since it's never closed. - return Poll::Ready(Ok(())) + return Poll::Ready(Ok(())); }, Poll::Ready(Some(command)) => { // some command issued externally diff --git a/client/finality-grandpa/src/tests.rs b/client/finality-grandpa/src/tests.rs index 93d20110ff5af..9ac18df64608d 100644 --- a/client/finality-grandpa/src/tests.rs +++ b/client/finality-grandpa/src/tests.rs @@ -1118,8 +1118,8 @@ fn voter_persists_its_votes() { Pin::new(&mut *round_tx.lock()) .start_send(finality_grandpa::Message::Prevote(prevote)) .unwrap(); - } else if state.compare_exchange(1, 2, Ordering::SeqCst, Ordering::SeqCst).unwrap() == - 1 + } else if state.compare_exchange(1, 2, Ordering::SeqCst, Ordering::SeqCst).unwrap() + == 1 { // the next message we receive should be our own prevote let prevote = match signed.message { @@ -1133,8 +1133,8 @@ fn voter_persists_its_votes() { // after alice restarts it should send its previous prevote // therefore we won't ever receive it again since it will be a // known message on the gossip layer - } else if state.compare_exchange(2, 3, Ordering::SeqCst, Ordering::SeqCst).unwrap() == - 2 + } else if state.compare_exchange(2, 3, Ordering::SeqCst, Ordering::SeqCst).unwrap() + == 2 { // we then receive a precommit from alice for block 15 // even though we casted a prevote for block 30 diff --git a/client/finality-grandpa/src/until_imported.rs b/client/finality-grandpa/src/until_imported.rs index df0b63348e94b..27b6229a5afd5 100644 --- a/client/finality-grandpa/src/until_imported.rs +++ b/client/finality-grandpa/src/until_imported.rs @@ -333,7 +333,7 @@ where if let Some(metrics) = &mut this.metrics { metrics.waiting_messages_dec(); } - return Poll::Ready(Some(Ok(ready))) + return Poll::Ready(Some(Ok(ready))); } if this.import_notifications.is_done() && this.incoming_messages.is_done() { @@ -366,9 +366,9 @@ impl BlockUntilImported for SignedMessage { if let Some(number) = status_check.block_number(target_hash)? { if number != target_number { warn_authority_wrong_target(target_hash, msg.id); - return Ok(DiscardWaitOrReady::Discard) + return Ok(DiscardWaitOrReady::Discard); } else { - return Ok(DiscardWaitOrReady::Ready(msg)) + return Ok(DiscardWaitOrReady::Ready(msg)); } } @@ -459,7 +459,7 @@ impl BlockUntilImported for BlockGlobalMessage { // invalid global message: messages targeting wrong number // or at least different from other vote in same global // message. - return Ok(false) + return Ok(false); } Ok(true) @@ -473,7 +473,7 @@ impl BlockUntilImported for BlockGlobalMessage { for (target_number, target_hash) in precommit_targets { if !query_known(target_hash, target_number)? { - return Ok(DiscardWaitOrReady::Discard) + return Ok(DiscardWaitOrReady::Discard); } } }, @@ -493,7 +493,7 @@ impl BlockUntilImported for BlockGlobalMessage { for (target_number, target_hash) in targets { if !query_known(target_hash, target_number)? { - return Ok(DiscardWaitOrReady::Discard) + return Ok(DiscardWaitOrReady::Discard); } } }, @@ -511,7 +511,7 @@ impl BlockUntilImported for BlockGlobalMessage { if unknown_hashes.is_empty() { // none of the hashes in the global message were unknown. // we can just return the message directly. - return Ok(DiscardWaitOrReady::Ready(input)) + return Ok(DiscardWaitOrReady::Ready(input)); } let locked_global = Arc::new(Mutex::new(Some(input))); @@ -538,7 +538,7 @@ impl BlockUntilImported for BlockGlobalMessage { // Delete the inner message so it won't ever be forwarded. Future calls to // `wait_completed` on the same `inner` will ignore it. *self.inner.lock() = None; - return None + return None; } match Arc::try_unwrap(self.inner) { @@ -939,10 +939,10 @@ mod tests { let block_sync_requests = block_sync_requester.requests.lock(); // we request blocks targeted by the precommits that aren't imported - if block_sync_requests.contains(&(h2.hash(), *h2.number())) && - block_sync_requests.contains(&(h3.hash(), *h3.number())) + if block_sync_requests.contains(&(h2.hash(), *h2.number())) + && block_sync_requests.contains(&(h3.hash(), *h3.number())) { - return Poll::Ready(()) + return Poll::Ready(()); } // NOTE: nothing in this function is future-aware (i.e nothing gets registered to wake diff --git a/client/finality-grandpa/src/voting_rule.rs b/client/finality-grandpa/src/voting_rule.rs index fb7754fc0169a..993f87a259d1b 100644 --- a/client/finality-grandpa/src/voting_rule.rs +++ b/client/finality-grandpa/src/voting_rule.rs @@ -105,13 +105,13 @@ where use sp_arithmetic::traits::Saturating; if current_target.number().is_zero() { - return Box::pin(async { None }) + return Box::pin(async { None }); } // Constrain to the base number, if that's the minimal // vote that can be placed. if *base.number() + self.0 > *best_target.number() { - return Box::pin(std::future::ready(Some((base.hash(), *base.number())))) + return Box::pin(std::future::ready(Some((base.hash(), *base.number())))); } // find the target number restricted by this rule @@ -119,7 +119,7 @@ where // our current target is already lower than this rule would restrict if target_number >= *current_target.number() { - return Box::pin(async { None }) + return Box::pin(async { None }); } let current_target = current_target.clone(); @@ -161,7 +161,7 @@ where // our current target is already lower than this rule would restrict if target_number >= *current_target.number() { - return Box::pin(async { None }) + return Box::pin(async { None }); } // find the block at the given target height @@ -192,7 +192,7 @@ where } if *target_header.number() == target_number { - return Some((target_hash, target_number)) + return Some((target_hash, target_number)); } target_hash = *target_header.parent_hash(); @@ -239,8 +239,8 @@ where .await .filter(|(_, restricted_number)| { // NOTE: we can only restrict votes within the interval [base, target) - restricted_number >= base.number() && - restricted_number < restricted_target.number() + restricted_number >= base.number() + && restricted_number < restricted_target.number() }) .and_then(|(hash, _)| backend.header(BlockId::Hash(hash)).ok()) .and_then(std::convert::identity) diff --git a/client/finality-grandpa/src/warp_proof.rs b/client/finality-grandpa/src/warp_proof.rs index c9f762fc7d593..3be4cb60da81f 100644 --- a/client/finality-grandpa/src/warp_proof.rs +++ b/client/finality-grandpa/src/warp_proof.rs @@ -94,7 +94,7 @@ impl WarpSyncProof { .ok_or_else(|| Error::InvalidRequest("Missing start block".to_string()))?; if begin_number > blockchain.info().finalized_number { - return Err(Error::InvalidRequest("Start block is not finalized".to_string())) + return Err(Error::InvalidRequest("Start block is not finalized".to_string())); } let canon_hash = blockchain.hash(begin_number)?.expect( @@ -106,7 +106,7 @@ impl WarpSyncProof { if canon_hash != begin { return Err(Error::InvalidRequest( "Start block is not in the finalized chain".to_string(), - )) + )); } let mut proofs = Vec::new(); @@ -126,7 +126,7 @@ impl WarpSyncProof { // if it doesn't contain a signal for standard change then the set must have changed // through a forced changed, in which case we stop collecting proofs as the chain of // trust in authority handoffs was broken. - break + break; } let justification = blockchain @@ -148,7 +148,7 @@ impl WarpSyncProof { // room for rest of the data (the size of the `Vec` and the boolean). if proofs_encoded_len + proof_size >= MAX_WARP_SYNC_PROOF_SIZE - 50 { proof_limit_reached = true; - break + break; } proofs_encoded_len += proof_size; @@ -217,7 +217,7 @@ impl WarpSyncProof { if proof.justification.target().1 != hash { return Err(Error::InvalidProof( "Mismatch between header and justification".to_owned(), - )) + )); } if let Some(scheduled_change) = find_scheduled_change::(&proof.header) { @@ -228,7 +228,7 @@ impl WarpSyncProof { // authority set change. return Err(Error::InvalidProof( "Header is missing authority set change digest".to_string(), - )) + )); } } } diff --git a/client/informant/src/display.rs b/client/informant/src/display.rs index 3d585a9985134..d5526706e17ed 100644 --- a/client/informant/src/display.rs +++ b/client/informant/src/display.rs @@ -119,10 +119,12 @@ impl InformantDisplay { ), ), (SyncState::Idle, _, _) => ("💤", "Idle".into(), "".into()), - (SyncState::Downloading { target }, _, _) => - ("⚙️ ", format!("Syncing{}", speed), format!(", target=#{target}")), - (SyncState::Importing { target }, _, _) => - ("⚙️ ", format!("Preparing{}", speed), format!(", target=#{target}")), + (SyncState::Downloading { target }, _, _) => { + ("⚙️ ", format!("Syncing{}", speed), format!(", target=#{target}")) + }, + (SyncState::Importing { target }, _, _) => { + ("⚙️ ", format!("Preparing{}", speed), format!(", target=#{target}")) + }, }; if self.format.enable_color { @@ -186,8 +188,8 @@ fn speed( let speed = diff .saturating_mul(10_000) .checked_div(u128::from(elapsed_ms)) - .map_or(0.0, |s| s as f64) / - 10.0; + .map_or(0.0, |s| s as f64) + / 10.0; format!(" {:4.1} bps", speed) } else { // If the number of blocks can't be converted to a regular integer, then we need a more @@ -211,17 +213,17 @@ impl fmt::Display for TransferRateFormat { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { // Special case 0. if self.0 == 0 { - return write!(f, "0") + return write!(f, "0"); } // Under 0.1 kiB, display plain bytes. if self.0 < 100 { - return write!(f, "{} B/s", self.0) + return write!(f, "{} B/s", self.0); } // Under 1.0 MiB/sec, display the value in kiB/sec. if self.0 < 1024 * 1024 { - return write!(f, "{:.1}kiB/s", self.0 as f64 / 1024.0) + return write!(f, "{:.1}kiB/s", self.0 as f64 / 1024.0); } write!(f, "{:.1}MiB/s", self.0 as f64 / (1024.0 * 1024.0)) diff --git a/client/keystore/src/lib.rs b/client/keystore/src/lib.rs index cf94a16f08d86..01657099e90de 100644 --- a/client/keystore/src/lib.rs +++ b/client/keystore/src/lib.rs @@ -63,8 +63,9 @@ impl From for TraitError { fn from(error: Error) -> Self { match error { Error::KeyNotSupported(id) => TraitError::KeyNotSupported(id), - Error::InvalidSeed | Error::InvalidPhrase | Error::PublicKeyMismatch => - TraitError::ValidationError(error.to_string()), + Error::InvalidSeed | Error::InvalidPhrase | Error::PublicKeyMismatch => { + TraitError::ValidationError(error.to_string()) + }, Error::Unavailable => TraitError::Unavailable, Error::Io(e) => TraitError::Other(e.to_string()), Error::Json(e) => TraitError::Other(e.to_string()), diff --git a/client/keystore/src/local.rs b/client/keystore/src/local.rs index 54ff6a5b164a8..31ec1d3f000db 100644 --- a/client/keystore/src/local.rs +++ b/client/keystore/src/local.rs @@ -245,8 +245,9 @@ impl SyncCryptoStore for LocalKeystore { seed: Option<&str>, ) -> std::result::Result { let pair = match seed { - Some(seed) => - self.0.write().insert_ephemeral_from_seed_by_type::(seed, id), + Some(seed) => { + self.0.write().insert_ephemeral_from_seed_by_type::(seed, id) + }, None => self.0.write().generate_by_type::(id), } .map_err(|e| -> TraitError { e.into() })?; @@ -272,8 +273,9 @@ impl SyncCryptoStore for LocalKeystore { seed: Option<&str>, ) -> std::result::Result { let pair = match seed { - Some(seed) => - self.0.write().insert_ephemeral_from_seed_by_type::(seed, id), + Some(seed) => { + self.0.write().insert_ephemeral_from_seed_by_type::(seed, id) + }, None => self.0.write().generate_by_type::(id), } .map_err(|e| -> TraitError { e.into() })?; @@ -299,8 +301,9 @@ impl SyncCryptoStore for LocalKeystore { seed: Option<&str>, ) -> std::result::Result { let pair = match seed { - Some(seed) => - self.0.write().insert_ephemeral_from_seed_by_type::(seed, id), + Some(seed) => { + self.0.write().insert_ephemeral_from_seed_by_type::(seed, id) + }, None => self.0.write().generate_by_type::(id), } .map_err(|e| -> TraitError { e.into() })?; @@ -468,13 +471,13 @@ impl KeystoreInner { /// Get the key phrase for a given public key and key type. fn key_phrase_by_type(&self, public: &[u8], key_type: KeyTypeId) -> Result> { if let Some(phrase) = self.get_additional_pair(public, key_type) { - return Ok(Some(phrase.clone())) + return Ok(Some(phrase.clone())); } let path = if let Some(path) = self.key_file_path(public, key_type) { path } else { - return Ok(None) + return Ok(None); }; if path.exists() { @@ -495,7 +498,7 @@ impl KeystoreInner { let phrase = if let Some(p) = self.key_phrase_by_type(public.as_slice(), key_type)? { p } else { - return Ok(None) + return Ok(None); }; let pair = Pair::from_string(&phrase, self.password()).map_err(|_| Error::InvalidPhrase)?; @@ -537,7 +540,7 @@ impl KeystoreInner { match array_bytes::hex2bytes(name) { Ok(ref hex) if hex.len() > 4 => { if hex[0..4] != id.0 { - continue + continue; } let public = hex[4..].to_vec(); public_keys.push(public); diff --git a/client/network-gossip/src/bridge.rs b/client/network-gossip/src/bridge.rs index 5563b3be35e8d..eb3ef66b700ec 100644 --- a/client/network-gossip/src/bridge.rs +++ b/client/network-gossip/src/bridge.rs @@ -188,13 +188,13 @@ impl Future for GossipEngine { }, Event::NotificationStreamOpened { remote, protocol, role, .. } => { if protocol != this.protocol { - continue + continue; } this.state_machine.new_peer(&mut *this.network, remote, role); }, Event::NotificationStreamClosed { remote, protocol } => { if protocol != this.protocol { - continue + continue; } this.state_machine.peer_disconnected(&mut *this.network, remote); }, @@ -223,7 +223,7 @@ impl Future for GossipEngine { // The network event stream closed. Do the same for [`GossipValidator`]. Poll::Ready(None) => { self.is_terminated = true; - return Poll::Ready(()) + return Poll::Ready(()); }, Poll::Pending => break, } @@ -233,7 +233,7 @@ impl Future for GossipEngine { Some(n) => n, None => { this.forwarding_state = ForwardingState::Idle; - continue + continue; }, }; @@ -251,7 +251,7 @@ impl Future for GossipEngine { Poll::Pending => { // Push back onto queue for later. to_forward.push_front((topic, notification)); - break 'outer + break 'outer; }, } } @@ -261,7 +261,7 @@ impl Future for GossipEngine { if sinks.is_empty() { this.message_sinks.remove(&topic); - continue + continue; } trace!( @@ -731,7 +731,7 @@ mod tests { } if !progress { - break + break; } } Poll::Ready(()) diff --git a/client/network-gossip/src/state_machine.rs b/client/network-gossip/src/state_machine.rs index 001f2c6136a00..266004763d935 100644 --- a/client/network-gossip/src/state_machine.rs +++ b/client/network-gossip/src/state_machine.rs @@ -114,12 +114,13 @@ where for (id, ref mut peer) in peers.iter_mut() { for (message_hash, topic, message) in messages.clone() { let intent = match intent { - MessageIntent::Broadcast { .. } => + MessageIntent::Broadcast { .. } => { if peer.known_messages.contains(message_hash) { - continue + continue; } else { MessageIntent::Broadcast - }, + } + }, MessageIntent::PeriodicRebroadcast => { if peer.known_messages.contains(message_hash) { MessageIntent::PeriodicRebroadcast @@ -133,7 +134,7 @@ where }; if !message_allowed(id, intent, topic, message) { - continue + continue; } peer.known_messages.insert(*message_hash); @@ -355,7 +356,7 @@ impl ConsensusGossip { "Ignored already known message", ); network.report_peer(who, rep::DUPLICATE_GOSSIP); - continue + continue; } // validate the message @@ -375,7 +376,7 @@ impl ConsensusGossip { protocol = %self.protocol, "Discard message from peer", ); - continue + continue; }, }; @@ -388,7 +389,7 @@ impl ConsensusGossip { protocol = %self.protocol, "Got message from unregistered peer", ); - continue + continue; }, }; @@ -421,11 +422,11 @@ impl ConsensusGossip { if force { MessageIntent::ForcedBroadcast } else { MessageIntent::Broadcast }; if !force && peer.known_messages.contains(&entry.message_hash) { - continue + continue; } if !message_allowed(who, intent, &entry.topic, &entry.message) { - continue + continue; } peer.known_messages.insert(entry.message_hash); diff --git a/client/network/bitswap/src/lib.rs b/client/network/bitswap/src/lib.rs index 62a18b18c839d..d92f4c52cdcb2 100644 --- a/client/network/bitswap/src/lib.rs +++ b/client/network/bitswap/src/lib.rs @@ -127,8 +127,9 @@ impl BitswapRequestHandler { }; match pending_response.send(response) { - Ok(()) => - trace!(target: LOG_TARGET, "Handled bitswap request from {peer}.",), + Ok(()) => { + trace!(target: LOG_TARGET, "Handled bitswap request from {peer}.",) + }, Err(_) => debug!( target: LOG_TARGET, "Failed to handle light client request from {peer}: {}", @@ -175,13 +176,13 @@ impl BitswapRequestHandler { Some(wantlist) => wantlist, None => { debug!(target: LOG_TARGET, "Unexpected bitswap message from {}", peer); - return Err(BitswapError::InvalidWantList) + return Err(BitswapError::InvalidWantList); }, }; if wantlist.entries.len() > MAX_WANTED_BLOCKS { trace!(target: LOG_TARGET, "Ignored request: too many entries"); - return Err(BitswapError::TooManyEntries) + return Err(BitswapError::TooManyEntries); } for entry in wantlist.entries { @@ -189,16 +190,16 @@ impl BitswapRequestHandler { Ok(cid) => cid, Err(e) => { trace!(target: LOG_TARGET, "Bad CID {:?}: {:?}", entry.block, e); - continue + continue; }, }; - if cid.version() != cid::Version::V1 || - cid.hash().code() != u64::from(cid::multihash::Code::Blake2b256) || - cid.hash().size() != 32 + if cid.version() != cid::Version::V1 + || cid.hash().code() != u64::from(cid::multihash::Code::Blake2b256) + || cid.hash().size() != 32 { debug!(target: LOG_TARGET, "Ignoring unsupported CID {}: {}", peer, cid); - continue + continue; } let mut hash = B::Hash::default(); diff --git a/client/network/common/src/config.rs b/client/network/common/src/config.rs index 96c7c11ec2696..d27e1b4e6c0f3 100644 --- a/client/network/common/src/config.rs +++ b/client/network/common/src/config.rs @@ -71,8 +71,9 @@ pub fn parse_str_addr(addr_str: &str) -> Result<(PeerId, Multiaddr), ParseErr> { /// Splits a Multiaddress into a Multiaddress and PeerId. pub fn parse_addr(mut addr: Multiaddr) -> Result<(PeerId, Multiaddr), ParseErr> { let who = match addr.pop() { - Some(multiaddr::Protocol::P2p(key)) => - PeerId::from_multihash(key).map_err(|_| ParseErr::InvalidPeerId)?, + Some(multiaddr::Protocol::P2p(key)) => { + PeerId::from_multihash(key).map_err(|_| ParseErr::InvalidPeerId)? + }, _ => return Err(ParseErr::PeerIdMissing), }; diff --git a/client/network/common/src/service/signature.rs b/client/network/common/src/service/signature.rs index 602ef3d82979a..95df1e9b73ffe 100644 --- a/client/network/common/src/service/signature.rs +++ b/client/network/common/src/service/signature.rs @@ -47,7 +47,7 @@ impl Signature { /// Verify whether the signature was made for the given message by the entity that controls the /// given `PeerId`. pub fn verify(&self, message: impl AsRef<[u8]>, peer_id: &PeerId) -> bool { - *peer_id == self.public_key.to_peer_id() && - self.public_key.verify(message.as_ref(), &self.bytes) + *peer_id == self.public_key.to_peer_id() + && self.public_key.verify(message.as_ref(), &self.bytes) } } diff --git a/client/network/common/src/utils.rs b/client/network/common/src/utils.rs index d0e61a0d0475d..dd7f59081f630 100644 --- a/client/network/common/src/utils.rs +++ b/client/network/common/src/utils.rs @@ -51,7 +51,7 @@ impl LruHashSet { if self.set.len() == usize::from(self.limit) { self.set.pop_front(); // remove oldest entry } - return true + return true; } false } diff --git a/client/network/light/src/light_client_requests/handler.rs b/client/network/light/src/light_client_requests/handler.rs index 77904c7256295..84e0fcc7b3c5c 100644 --- a/client/network/light/src/light_client_requests/handler.rs +++ b/client/network/light/src/light_client_requests/handler.rs @@ -147,14 +147,18 @@ where let request = schema::v1::light::Request::decode(&payload[..])?; let response = match &request.request { - Some(schema::v1::light::request::Request::RemoteCallRequest(r)) => - self.on_remote_call_request(&peer, r)?, - Some(schema::v1::light::request::Request::RemoteReadRequest(r)) => - self.on_remote_read_request(&peer, r)?, - Some(schema::v1::light::request::Request::RemoteReadChildRequest(r)) => - self.on_remote_read_child_request(&peer, r)?, - None => - return Err(HandleRequestError::BadRequest("Remote request without request data.")), + Some(schema::v1::light::request::Request::RemoteCallRequest(r)) => { + self.on_remote_call_request(&peer, r)? + }, + Some(schema::v1::light::request::Request::RemoteReadRequest(r)) => { + self.on_remote_read_request(&peer, r)? + }, + Some(schema::v1::light::request::Request::RemoteReadChildRequest(r)) => { + self.on_remote_read_child_request(&peer, r)? + }, + None => { + return Err(HandleRequestError::BadRequest("Remote request without request data.")) + }, }; let mut data = Vec::new(); @@ -199,7 +203,7 @@ where ) -> Result { if request.keys.is_empty() { debug!("Invalid remote read request sent by {}.", peer); - return Err(HandleRequestError::BadRequest("Remote read request without keys.")) + return Err(HandleRequestError::BadRequest("Remote read request without keys.")); } trace!( @@ -239,7 +243,7 @@ where ) -> Result { if request.keys.is_empty() { debug!("Invalid remote child read request sent by {}.", peer); - return Err(HandleRequestError::BadRequest("Remove read child request without keys.")) + return Err(HandleRequestError::BadRequest("Remove read child request without keys.")); } trace!( diff --git a/client/network/src/behaviour.rs b/client/network/src/behaviour.rs index 2e646956e9d8c..74356d9749ccb 100644 --- a/client/network/src/behaviour.rs +++ b/client/network/src/behaviour.rs @@ -352,16 +352,21 @@ fn reported_roles_to_observed_role(roles: Roles) -> ObservedRole { impl From> for BehaviourOut { fn from(event: CustomMessageOutcome) -> Self { match event { - CustomMessageOutcome::BlockImport(origin, blocks) => - BehaviourOut::BlockImport(origin, blocks), - CustomMessageOutcome::JustificationImport(origin, hash, nb, justification) => - BehaviourOut::JustificationImport(origin, hash, nb, justification), - CustomMessageOutcome::BlockRequest { target, request, pending_response } => - BehaviourOut::BlockRequest { target, request, pending_response }, - CustomMessageOutcome::StateRequest { target, request, pending_response } => - BehaviourOut::StateRequest { target, request, pending_response }, - CustomMessageOutcome::WarpSyncRequest { target, request, pending_response } => - BehaviourOut::WarpSyncRequest { target, request, pending_response }, + CustomMessageOutcome::BlockImport(origin, blocks) => { + BehaviourOut::BlockImport(origin, blocks) + }, + CustomMessageOutcome::JustificationImport(origin, hash, nb, justification) => { + BehaviourOut::JustificationImport(origin, hash, nb, justification) + }, + CustomMessageOutcome::BlockRequest { target, request, pending_response } => { + BehaviourOut::BlockRequest { target, request, pending_response } + }, + CustomMessageOutcome::StateRequest { target, request, pending_response } => { + BehaviourOut::StateRequest { target, request, pending_response } + }, + CustomMessageOutcome::WarpSyncRequest { target, request, pending_response } => { + BehaviourOut::WarpSyncRequest { target, request, pending_response } + }, CustomMessageOutcome::NotificationStreamOpened { remote, protocol, @@ -380,14 +385,17 @@ impl From> for BehaviourOut { protocol, notifications_sink, } => BehaviourOut::NotificationStreamReplaced { remote, protocol, notifications_sink }, - CustomMessageOutcome::NotificationStreamClosed { remote, protocol } => - BehaviourOut::NotificationStreamClosed { remote, protocol }, - CustomMessageOutcome::NotificationsReceived { remote, messages } => - BehaviourOut::NotificationsReceived { remote, messages }, + CustomMessageOutcome::NotificationStreamClosed { remote, protocol } => { + BehaviourOut::NotificationStreamClosed { remote, protocol } + }, + CustomMessageOutcome::NotificationsReceived { remote, messages } => { + BehaviourOut::NotificationsReceived { remote, messages } + }, CustomMessageOutcome::PeerNewBest(_peer_id, _number) => BehaviourOut::None, CustomMessageOutcome::SyncConnected(peer_id) => BehaviourOut::SyncConnected(peer_id), - CustomMessageOutcome::SyncDisconnected(peer_id) => - BehaviourOut::SyncDisconnected(peer_id), + CustomMessageOutcome::SyncDisconnected(peer_id) => { + BehaviourOut::SyncDisconnected(peer_id) + }, CustomMessageOutcome::None => BehaviourOut::None, } } @@ -396,12 +404,15 @@ impl From> for BehaviourOut { impl From for BehaviourOut { fn from(event: request_responses::Event) -> Self { match event { - request_responses::Event::InboundRequest { peer, protocol, result } => - BehaviourOut::InboundRequest { peer, protocol, result }, - request_responses::Event::RequestFinished { peer, protocol, duration, result } => - BehaviourOut::RequestFinished { peer, protocol, duration, result }, - request_responses::Event::ReputationChanges { peer, changes } => - BehaviourOut::ReputationChanges { peer, changes }, + request_responses::Event::InboundRequest { peer, protocol, result } => { + BehaviourOut::InboundRequest { peer, protocol, result } + }, + request_responses::Event::RequestFinished { peer, protocol, duration, result } => { + BehaviourOut::RequestFinished { peer, protocol, duration, result } + }, + request_responses::Event::ReputationChanges { peer, changes } => { + BehaviourOut::ReputationChanges { peer, changes } + }, } } } @@ -424,14 +435,18 @@ impl From for BehaviourOut { BehaviourOut::None }, DiscoveryOut::Discovered(peer_id) => BehaviourOut::Discovered(peer_id), - DiscoveryOut::ValueFound(results, duration) => - BehaviourOut::Dht(DhtEvent::ValueFound(results), duration), - DiscoveryOut::ValueNotFound(key, duration) => - BehaviourOut::Dht(DhtEvent::ValueNotFound(key), duration), - DiscoveryOut::ValuePut(key, duration) => - BehaviourOut::Dht(DhtEvent::ValuePut(key), duration), - DiscoveryOut::ValuePutFailed(key, duration) => - BehaviourOut::Dht(DhtEvent::ValuePutFailed(key), duration), + DiscoveryOut::ValueFound(results, duration) => { + BehaviourOut::Dht(DhtEvent::ValueFound(results), duration) + }, + DiscoveryOut::ValueNotFound(key, duration) => { + BehaviourOut::Dht(DhtEvent::ValueNotFound(key), duration) + }, + DiscoveryOut::ValuePut(key, duration) => { + BehaviourOut::Dht(DhtEvent::ValuePut(key), duration) + }, + DiscoveryOut::ValuePutFailed(key, duration) => { + BehaviourOut::Dht(DhtEvent::ValuePutFailed(key), duration) + }, DiscoveryOut::RandomKademliaStarted => BehaviourOut::RandomKademliaStarted, } } diff --git a/client/network/src/discovery.rs b/client/network/src/discovery.rs index 00fc78061293d..db949bf797513 100644 --- a/client/network/src/discovery.rs +++ b/client/network/src/discovery.rs @@ -339,7 +339,7 @@ impl DiscoveryBehaviour { target: "sub-libp2p", "Ignoring self-reported non-global address {} from {}.", addr, peer_id ); - return + return; } if let Some(matching_protocol) = supported_protocols @@ -422,8 +422,9 @@ impl DiscoveryBehaviour { let ip = match addr.iter().next() { Some(Protocol::Ip4(ip)) => IpNetwork::from(ip), Some(Protocol::Ip6(ip)) => IpNetwork::from(ip), - Some(Protocol::Dns(_)) | Some(Protocol::Dns4(_)) | Some(Protocol::Dns6(_)) => - return true, + Some(Protocol::Dns(_)) | Some(Protocol::Dns4(_)) | Some(Protocol::Dns6(_)) => { + return true + }, _ => return false, }; ip.is_global() @@ -646,7 +647,7 @@ impl NetworkBehaviour for DiscoveryBehaviour { ) -> Poll> { // Immediately process the content of `discovered`. if let Some(ev) = self.pending_events.pop_front() { - return Poll::Ready(NetworkBehaviourAction::GenerateEvent(ev)) + return Poll::Ready(NetworkBehaviourAction::GenerateEvent(ev)); } // Poll the stream that fires when we need to start a random Kademlia query. @@ -680,7 +681,7 @@ impl NetworkBehaviour for DiscoveryBehaviour { if actually_started { let ev = DiscoveryOut::RandomKademliaStarted; - return Poll::Ready(NetworkBehaviourAction::GenerateEvent(ev)) + return Poll::Ready(NetworkBehaviourAction::GenerateEvent(ev)); } } } @@ -691,18 +692,18 @@ impl NetworkBehaviour for DiscoveryBehaviour { NetworkBehaviourAction::GenerateEvent(ev) => match ev { KademliaEvent::RoutingUpdated { peer, .. } => { let ev = DiscoveryOut::Discovered(peer); - return Poll::Ready(NetworkBehaviourAction::GenerateEvent(ev)) + return Poll::Ready(NetworkBehaviourAction::GenerateEvent(ev)); }, KademliaEvent::UnroutablePeer { peer, .. } => { let ev = DiscoveryOut::UnroutablePeer(peer); - return Poll::Ready(NetworkBehaviourAction::GenerateEvent(ev)) + return Poll::Ready(NetworkBehaviourAction::GenerateEvent(ev)); }, KademliaEvent::RoutablePeer { peer, .. } => { let ev = DiscoveryOut::Discovered(peer); - return Poll::Ready(NetworkBehaviourAction::GenerateEvent(ev)) + return Poll::Ready(NetworkBehaviourAction::GenerateEvent(ev)); }, - KademliaEvent::PendingRoutablePeer { .. } | - KademliaEvent::InboundRequest { .. } => { + KademliaEvent::PendingRoutablePeer { .. } + | KademliaEvent::InboundRequest { .. } => { // We are not interested in this event at the moment. }, KademliaEvent::OutboundQueryCompleted { @@ -771,7 +772,7 @@ impl NetworkBehaviour for DiscoveryBehaviour { ) }, }; - return Poll::Ready(NetworkBehaviourAction::GenerateEvent(ev)) + return Poll::Ready(NetworkBehaviourAction::GenerateEvent(ev)); }, KademliaEvent::OutboundQueryCompleted { result: QueryResult::PutRecord(res), @@ -779,8 +780,9 @@ impl NetworkBehaviour for DiscoveryBehaviour { .. } => { let ev = match res { - Ok(ok) => - DiscoveryOut::ValuePut(ok.key, stats.duration().unwrap_or_default()), + Ok(ok) => { + DiscoveryOut::ValuePut(ok.key, stats.duration().unwrap_or_default()) + }, Err(e) => { debug!( target: "sub-libp2p", @@ -793,7 +795,7 @@ impl NetworkBehaviour for DiscoveryBehaviour { ) }, }; - return Poll::Ready(NetworkBehaviourAction::GenerateEvent(ev)) + return Poll::Ready(NetworkBehaviourAction::GenerateEvent(ev)); }, KademliaEvent::OutboundQueryCompleted { result: QueryResult::RepublishRecord(res), @@ -815,24 +817,28 @@ impl NetworkBehaviour for DiscoveryBehaviour { warn!(target: "sub-libp2p", "Libp2p => Unhandled Kademlia event: {:?}", e) }, }, - NetworkBehaviourAction::Dial { opts, handler } => - return Poll::Ready(NetworkBehaviourAction::Dial { opts, handler }), - NetworkBehaviourAction::NotifyHandler { peer_id, handler, event } => + NetworkBehaviourAction::Dial { opts, handler } => { + return Poll::Ready(NetworkBehaviourAction::Dial { opts, handler }) + }, + NetworkBehaviourAction::NotifyHandler { peer_id, handler, event } => { return Poll::Ready(NetworkBehaviourAction::NotifyHandler { peer_id, handler, event, - }), - NetworkBehaviourAction::ReportObservedAddr { address, score } => + }) + }, + NetworkBehaviourAction::ReportObservedAddr { address, score } => { return Poll::Ready(NetworkBehaviourAction::ReportObservedAddr { address, score, - }), - NetworkBehaviourAction::CloseConnection { peer_id, connection } => + }) + }, + NetworkBehaviourAction::CloseConnection { peer_id, connection } => { return Poll::Ready(NetworkBehaviourAction::CloseConnection { peer_id, connection, - }), + }) + }, } } @@ -843,13 +849,13 @@ impl NetworkBehaviour for DiscoveryBehaviour { NetworkBehaviourAction::GenerateEvent(event) => match event { MdnsEvent::Discovered(list) => { if self.num_connections >= self.discovery_only_if_under_num { - continue + continue; } self.pending_events .extend(list.map(|(peer_id, _)| DiscoveryOut::Discovered(peer_id))); if let Some(ev) = self.pending_events.pop_front() { - return Poll::Ready(NetworkBehaviourAction::GenerateEvent(ev)) + return Poll::Ready(NetworkBehaviourAction::GenerateEvent(ev)); } }, MdnsEvent::Expired(_) => {}, @@ -858,16 +864,18 @@ impl NetworkBehaviour for DiscoveryBehaviour { unreachable!("mDNS never dials!"); }, NetworkBehaviourAction::NotifyHandler { event, .. } => match event {}, /* `event` is an enum with no variant */ - NetworkBehaviourAction::ReportObservedAddr { address, score } => + NetworkBehaviourAction::ReportObservedAddr { address, score } => { return Poll::Ready(NetworkBehaviourAction::ReportObservedAddr { address, score, - }), - NetworkBehaviourAction::CloseConnection { peer_id, connection } => + }) + }, + NetworkBehaviourAction::CloseConnection { peer_id, connection } => { return Poll::Ready(NetworkBehaviourAction::CloseConnection { peer_id, connection, - }), + }) + }, } } } @@ -983,8 +991,8 @@ mod tests { match e { SwarmEvent::Behaviour(behavior) => { match behavior { - DiscoveryOut::UnroutablePeer(other) | - DiscoveryOut::Discovered(other) => { + DiscoveryOut::UnroutablePeer(other) + | DiscoveryOut::Discovered(other) => { // Call `add_self_reported_address` to simulate identify // happening. let addr = swarms @@ -1024,12 +1032,12 @@ mod tests { // ignore non Behaviour events _ => {}, } - continue 'polling + continue 'polling; }, _ => {}, } } - break + break; } if to_discover.iter().all(|l| l.is_empty()) { diff --git a/client/network/src/network_state.rs b/client/network/src/network_state.rs index 57073c57afa69..b143af776b1d6 100644 --- a/client/network/src/network_state.rs +++ b/client/network/src/network_state.rs @@ -106,10 +106,12 @@ pub enum Endpoint { impl From for PeerEndpoint { fn from(endpoint: ConnectedPoint) -> Self { match endpoint { - ConnectedPoint::Dialer { address, role_override } => - Self::Dialing(address, role_override.into()), - ConnectedPoint::Listener { local_addr, send_back_addr } => - Self::Listening { local_addr, send_back_addr }, + ConnectedPoint::Dialer { address, role_override } => { + Self::Dialing(address, role_override.into()) + }, + ConnectedPoint::Listener { local_addr, send_back_addr } => { + Self::Listening { local_addr, send_back_addr } + }, } } } diff --git a/client/network/src/peer_info.rs b/client/network/src/peer_info.rs index e04d006f50501..b37e9ce10fac4 100644 --- a/client/network/src/peer_info.rs +++ b/client/network/src/peer_info.rs @@ -370,24 +370,27 @@ impl NetworkBehaviour for PeerInfoBehaviour { Poll::Ready(NetworkBehaviourAction::Dial { opts, handler }) => { let handler = IntoConnectionHandler::select(handler, self.identify.new_handler()); - return Poll::Ready(NetworkBehaviourAction::Dial { opts, handler }) + return Poll::Ready(NetworkBehaviourAction::Dial { opts, handler }); }, - Poll::Ready(NetworkBehaviourAction::NotifyHandler { peer_id, handler, event }) => + Poll::Ready(NetworkBehaviourAction::NotifyHandler { peer_id, handler, event }) => { return Poll::Ready(NetworkBehaviourAction::NotifyHandler { peer_id, handler, event: EitherOutput::First(event), - }), - Poll::Ready(NetworkBehaviourAction::ReportObservedAddr { address, score }) => + }) + }, + Poll::Ready(NetworkBehaviourAction::ReportObservedAddr { address, score }) => { return Poll::Ready(NetworkBehaviourAction::ReportObservedAddr { address, score, - }), - Poll::Ready(NetworkBehaviourAction::CloseConnection { peer_id, connection }) => + }) + }, + Poll::Ready(NetworkBehaviourAction::CloseConnection { peer_id, connection }) => { return Poll::Ready(NetworkBehaviourAction::CloseConnection { peer_id, connection, - }), + }) + }, } } @@ -398,7 +401,7 @@ impl NetworkBehaviour for PeerInfoBehaviour { IdentifyEvent::Received { peer_id, info, .. } => { self.handle_identify_report(&peer_id, &info); let event = PeerInfoEvent::Identified { peer_id, info }; - return Poll::Ready(NetworkBehaviourAction::GenerateEvent(event)) + return Poll::Ready(NetworkBehaviourAction::GenerateEvent(event)); }, IdentifyEvent::Error { peer_id, error } => { debug!(target: "sub-libp2p", "Identification with peer {:?} failed => {}", peer_id, error) @@ -408,24 +411,27 @@ impl NetworkBehaviour for PeerInfoBehaviour { }, Poll::Ready(NetworkBehaviourAction::Dial { opts, handler }) => { let handler = IntoConnectionHandler::select(self.ping.new_handler(), handler); - return Poll::Ready(NetworkBehaviourAction::Dial { opts, handler }) + return Poll::Ready(NetworkBehaviourAction::Dial { opts, handler }); }, - Poll::Ready(NetworkBehaviourAction::NotifyHandler { peer_id, handler, event }) => + Poll::Ready(NetworkBehaviourAction::NotifyHandler { peer_id, handler, event }) => { return Poll::Ready(NetworkBehaviourAction::NotifyHandler { peer_id, handler, event: EitherOutput::Second(event), - }), - Poll::Ready(NetworkBehaviourAction::ReportObservedAddr { address, score }) => + }) + }, + Poll::Ready(NetworkBehaviourAction::ReportObservedAddr { address, score }) => { return Poll::Ready(NetworkBehaviourAction::ReportObservedAddr { address, score, - }), - Poll::Ready(NetworkBehaviourAction::CloseConnection { peer_id, connection }) => + }) + }, + Poll::Ready(NetworkBehaviourAction::CloseConnection { peer_id, connection }) => { return Poll::Ready(NetworkBehaviourAction::CloseConnection { peer_id, connection, - }), + }) + }, } } diff --git a/client/network/src/protocol.rs b/client/network/src/protocol.rs index 63d060f423773..f205a1c8cfb92 100644 --- a/client/network/src/protocol.rs +++ b/client/network/src/protocol.rs @@ -310,8 +310,8 @@ where out_peers: network_config.default_peers_set.out_peers, bootnodes, reserved_nodes: default_sets_reserved.clone(), - reserved_only: network_config.default_peers_set.non_reserved_mode == - NonReservedPeerMode::Deny, + reserved_only: network_config.default_peers_set.non_reserved_mode + == NonReservedPeerMode::Deny, }); for set_cfg in &network_config.extra_sets { @@ -358,8 +358,8 @@ where }; let cache_capacity = NonZeroUsize::new( - (network_config.default_peers_set.in_peers as usize + - network_config.default_peers_set.out_peers as usize) + (network_config.default_peers_set.in_peers as usize + + network_config.default_peers_set.out_peers as usize) .max(1), ) .expect("cache capacity is not zero"); @@ -378,8 +378,8 @@ where default_peers_set_no_slot_connected_peers: HashSet::new(), default_peers_set_num_full: network_config.default_peers_set_num_full as usize, default_peers_set_num_light: { - let total = network_config.default_peers_set.out_peers + - network_config.default_peers_set.in_peers; + let total = network_config.default_peers_set.out_peers + + network_config.default_peers_set.in_peers; total.saturating_sub(network_config.default_peers_set_num_full) as usize }, peerset_handle: peerset_handle.clone(), @@ -534,7 +534,7 @@ where Err(err) => { debug!(target: "sync", "Failed to decode block response from {}: {}", peer_id, err); self.peerset_handle.report_peer(peer_id, rep::BAD_MESSAGE); - return CustomMessageOutcome::None + return CustomMessageOutcome::None; }, }; @@ -561,8 +561,9 @@ where if request.fields == BlockAttributes::JUSTIFICATION { match self.chain_sync.on_block_justification(peer_id, block_response) { Ok(OnBlockJustification::Nothing) => CustomMessageOutcome::None, - Ok(OnBlockJustification::Import { peer, hash, number, justifications }) => - CustomMessageOutcome::JustificationImport(peer, hash, number, justifications), + Ok(OnBlockJustification::Import { peer, hash, number, justifications }) => { + CustomMessageOutcome::JustificationImport(peer, hash, number, justifications) + }, Err(BadPeer(id, repu)) => { self.behaviour.disconnect_peer(&id, HARDCODED_PEERSETS_SYNC); self.peerset_handle.report_peer(id, repu); @@ -571,10 +572,12 @@ where } } else { match self.chain_sync.on_block_data(&peer_id, Some(request), block_response) { - Ok(OnBlockData::Import(origin, blocks)) => - CustomMessageOutcome::BlockImport(origin, blocks), - Ok(OnBlockData::Request(peer, req)) => - prepare_block_request(self.chain_sync.as_ref(), &mut self.peers, peer, req), + Ok(OnBlockData::Import(origin, blocks)) => { + CustomMessageOutcome::BlockImport(origin, blocks) + }, + Ok(OnBlockData::Request(peer, req)) => { + prepare_block_request(self.chain_sync.as_ref(), &mut self.peers, peer, req) + }, Ok(OnBlockData::Continue) => CustomMessageOutcome::None, Err(BadPeer(id, repu)) => { self.behaviour.disconnect_peer(&id, HARDCODED_PEERSETS_SYNC); @@ -593,8 +596,9 @@ where response: OpaqueStateResponse, ) -> CustomMessageOutcome { match self.chain_sync.on_state_data(&peer_id, response) { - Ok(OnStateData::Import(origin, block)) => - CustomMessageOutcome::BlockImport(origin, vec![block]), + Ok(OnStateData::Import(origin, block)) => { + CustomMessageOutcome::BlockImport(origin, vec![block]) + }, Ok(OnStateData::Continue) => CustomMessageOutcome::None, Err(BadPeer(id, repu)) => { self.behaviour.disconnect_peer(&id, HARDCODED_PEERSETS_SYNC); @@ -643,7 +647,7 @@ where if self.peers.contains_key(&who) { error!(target: "sync", "Called on_sync_peer_connected with already connected peer {}", who); debug_assert!(false); - return Err(()) + return Err(()); } if status.genesis_hash != self.genesis_hash { @@ -666,7 +670,7 @@ where ); } - return Err(()) + return Err(()); } if self.roles.is_light() { @@ -675,7 +679,7 @@ where debug!(target: "sync", "Peer {} is unable to serve light requests", who); self.peerset_handle.report_peer(who, rep::BAD_ROLE); self.behaviour.disconnect_peer(&who, HARDCODED_PEERSETS_SYNC); - return Err(()) + return Err(()); } // we don't interested in peers that are far behind us @@ -688,31 +692,31 @@ where debug!(target: "sync", "Peer {} is far behind us and will unable to serve light requests", who); self.peerset_handle.report_peer(who, rep::PEER_BEHIND_US_LIGHT); self.behaviour.disconnect_peer(&who, HARDCODED_PEERSETS_SYNC); - return Err(()) + return Err(()); } } let no_slot_peer = self.default_peers_set_no_slot_peers.contains(&who); let this_peer_reserved_slot: usize = if no_slot_peer { 1 } else { 0 }; - if status.roles.is_full() && - self.chain_sync.num_peers() >= - self.default_peers_set_num_full + - self.default_peers_set_no_slot_connected_peers.len() + - this_peer_reserved_slot + if status.roles.is_full() + && self.chain_sync.num_peers() + >= self.default_peers_set_num_full + + self.default_peers_set_no_slot_connected_peers.len() + + this_peer_reserved_slot { debug!(target: "sync", "Too many full nodes, rejecting {}", who); self.behaviour.disconnect_peer(&who, HARDCODED_PEERSETS_SYNC); - return Err(()) + return Err(()); } - if status.roles.is_light() && - (self.peers.len() - self.chain_sync.num_peers()) >= self.default_peers_set_num_light + if status.roles.is_light() + && (self.peers.len() - self.chain_sync.num_peers()) >= self.default_peers_set_num_light { // Make sure that not all slots are occupied by light clients. debug!(target: "sync", "Too many light nodes, rejecting {}", who); self.behaviour.disconnect_peer(&who, HARDCODED_PEERSETS_SYNC); - return Err(()) + return Err(()); } let peer = Peer { @@ -733,7 +737,7 @@ where Err(BadPeer(id, repu)) => { self.behaviour.disconnect_peer(&id, HARDCODED_PEERSETS_SYNC); self.peerset_handle.report_peer(id, repu); - return Err(()) + return Err(()); }, } } else { @@ -770,17 +774,17 @@ where Ok(Some(header)) => header, Ok(None) => { warn!("Trying to announce unknown block: {}", hash); - return + return; }, Err(e) => { warn!("Error reading block header {}: {}", hash, e); - return + return; }, }; // don't announce genesis block since it will be ignored if header.number().is_zero() { - return + return; } let is_best = self.chain.info().best_hash == hash; @@ -827,7 +831,7 @@ where None => { log::error!(target: "sync", "Received block announce from disconnected peer {}", who); debug_assert!(false); - return + return; }, }; @@ -866,9 +870,9 @@ where // AND // 2) parent block is already imported and not pruned. if is_best { - return CustomMessageOutcome::PeerNewBest(who, *announce.header.number()) + return CustomMessageOutcome::PeerNewBest(who, *announce.header.number()); } else { - return CustomMessageOutcome::None + return CustomMessageOutcome::None; } }, PollBlockAnnounceValidation::ImportHeader { announce, is_best, who } => { @@ -888,7 +892,7 @@ where } self.report_peer(who, rep::BAD_BLOCK_ANNOUNCEMENT); - return CustomMessageOutcome::None + return CustomMessageOutcome::None; }, }; @@ -919,10 +923,12 @@ where } match blocks_to_import { - Ok(OnBlockData::Import(origin, blocks)) => - CustomMessageOutcome::BlockImport(origin, blocks), - Ok(OnBlockData::Request(peer, req)) => - prepare_block_request(self.chain_sync.as_ref(), &mut self.peers, peer, req), + Ok(OnBlockData::Import(origin, blocks)) => { + CustomMessageOutcome::BlockImport(origin, blocks) + }, + Ok(OnBlockData::Request(peer, req)) => { + prepare_block_request(self.chain_sync.as_ref(), &mut self.peers, peer, req) + }, Ok(OnBlockData::Continue) => CustomMessageOutcome::None, Err(BadPeer(id, repu)) => { self.behaviour.disconnect_peer(&id, HARDCODED_PEERSETS_SYNC); @@ -1302,7 +1308,7 @@ where params: &mut impl PollParameters, ) -> Poll> { if let Some(message) = self.pending_messages.pop_front() { - return Poll::Ready(NetworkBehaviourAction::GenerateEvent(message)) + return Poll::Ready(NetworkBehaviourAction::GenerateEvent(message)); } // Check for finished outgoing requests. @@ -1329,7 +1335,7 @@ where self.peerset_handle.report_peer(*id, rep::BAD_MESSAGE); self.behaviour .disconnect_peer(id, HARDCODED_PEERSETS_SYNC); - continue + continue; }, }; @@ -1349,7 +1355,7 @@ where self.peerset_handle.report_peer(*id, rep::BAD_MESSAGE); self.behaviour .disconnect_peer(id, HARDCODED_PEERSETS_SYNC); - continue + continue; }, }; @@ -1380,8 +1386,8 @@ where self.peerset_handle.report_peer(*id, rep::REFUSED); self.behaviour.disconnect_peer(id, HARDCODED_PEERSETS_SYNC); }, - RequestFailure::Network(OutboundFailure::ConnectionClosed) | - RequestFailure::NotConnected => { + RequestFailure::Network(OutboundFailure::ConnectionClosed) + | RequestFailure::NotConnected => { self.behaviour.disconnect_peer(id, HARDCODED_PEERSETS_SYNC); }, RequestFailure::UnknownProtocol => { @@ -1465,24 +1471,28 @@ where } if let Some(message) = self.pending_messages.pop_front() { - return Poll::Ready(NetworkBehaviourAction::GenerateEvent(message)) + return Poll::Ready(NetworkBehaviourAction::GenerateEvent(message)); } let event = match self.behaviour.poll(cx, params) { Poll::Pending => return Poll::Pending, Poll::Ready(NetworkBehaviourAction::GenerateEvent(ev)) => ev, - Poll::Ready(NetworkBehaviourAction::Dial { opts, handler }) => - return Poll::Ready(NetworkBehaviourAction::Dial { opts, handler }), - Poll::Ready(NetworkBehaviourAction::NotifyHandler { peer_id, handler, event }) => + Poll::Ready(NetworkBehaviourAction::Dial { opts, handler }) => { + return Poll::Ready(NetworkBehaviourAction::Dial { opts, handler }) + }, + Poll::Ready(NetworkBehaviourAction::NotifyHandler { peer_id, handler, event }) => { return Poll::Ready(NetworkBehaviourAction::NotifyHandler { peer_id, handler, event, - }), - Poll::Ready(NetworkBehaviourAction::ReportObservedAddr { address, score }) => - return Poll::Ready(NetworkBehaviourAction::ReportObservedAddr { address, score }), - Poll::Ready(NetworkBehaviourAction::CloseConnection { peer_id, connection }) => - return Poll::Ready(NetworkBehaviourAction::CloseConnection { peer_id, connection }), + }) + }, + Poll::Ready(NetworkBehaviourAction::ReportObservedAddr { address, score }) => { + return Poll::Ready(NetworkBehaviourAction::ReportObservedAddr { address, score }) + }, + Poll::Ready(NetworkBehaviourAction::CloseConnection { peer_id, connection }) => { + return Poll::Ready(NetworkBehaviourAction::CloseConnection { peer_id, connection }) + }, }; let outcome = match event { @@ -1584,9 +1594,9 @@ where } } }, - NotificationsOut::CustomProtocolReplaced { peer_id, notifications_sink, set_id } => - if set_id == HARDCODED_PEERSETS_SYNC || - self.bad_handshake_substreams.contains(&(peer_id, set_id)) + NotificationsOut::CustomProtocolReplaced { peer_id, notifications_sink, set_id } => { + if set_id == HARDCODED_PEERSETS_SYNC + || self.bad_handshake_substreams.contains(&(peer_id, set_id)) { CustomMessageOutcome::None } else { @@ -1595,7 +1605,8 @@ where protocol: self.notification_protocols[usize::from(set_id)].clone(), notifications_sink, } - }, + } + }, NotificationsOut::CustomProtocolClosed { peer_id, set_id } => { // Set number 0 is hardcoded the default set of peers we sync from. if set_id == HARDCODED_PEERSETS_SYNC { @@ -1647,8 +1658,9 @@ where ); CustomMessageOutcome::None }, - _ if self.bad_handshake_substreams.contains(&(peer_id, set_id)) => - CustomMessageOutcome::None, + _ if self.bad_handshake_substreams.contains(&(peer_id, set_id)) => { + CustomMessageOutcome::None + }, _ => { let protocol_name = self.notification_protocols[usize::from(set_id)].clone(); CustomMessageOutcome::NotificationsReceived { @@ -1660,11 +1672,11 @@ where }; if !matches!(outcome, CustomMessageOutcome::::None) { - return Poll::Ready(NetworkBehaviourAction::GenerateEvent(outcome)) + return Poll::Ready(NetworkBehaviourAction::GenerateEvent(outcome)); } if let Some(message) = self.pending_messages.pop_front() { - return Poll::Ready(NetworkBehaviourAction::GenerateEvent(message)) + return Poll::Ready(NetworkBehaviourAction::GenerateEvent(message)); } // This block can only be reached if an event was pulled from the behaviour and that diff --git a/client/network/src/protocol/message.rs b/client/network/src/protocol/message.rs index ef652387d2c7d..5df92d010a36d 100644 --- a/client/network/src/protocol/message.rs +++ b/client/network/src/protocol/message.rs @@ -166,12 +166,13 @@ pub mod generic { let compact = CompactStatus::decode(value)?; let chain_status = match >::decode(value) { Ok(v) => v, - Err(e) => + Err(e) => { if compact.version <= LAST_CHAIN_STATUS_VERSION { - return Err(e) + return Err(e); } else { Vec::new() - }, + } + }, }; let CompactStatus { diff --git a/client/network/src/protocol/notifications/behaviour.rs b/client/network/src/protocol/notifications/behaviour.rs index 04f6fe445ac63..9e21d15b82bca 100644 --- a/client/network/src/protocol/notifications/behaviour.rs +++ b/client/network/src/protocol/notifications/behaviour.rs @@ -427,7 +427,7 @@ impl Notifications { let mut entry = if let Entry::Occupied(entry) = self.peers.entry((*peer_id, set_id)) { entry } else { - return + return; }; match mem::replace(entry.get_mut(), PeerState::Poisoned) { @@ -512,7 +512,7 @@ impl Notifications { target: "sub-libp2p", "State mismatch in libp2p: no entry in incoming for incoming peer" ); - return + return; }; inc.alive = false; @@ -577,7 +577,7 @@ impl Notifications { "Tried to sent notification to {:?} without an open channel.", target, ); - return + return; }, Some(sink) => sink, }; @@ -621,7 +621,7 @@ impl Notifications { handler, }); entry.insert(PeerState::Requested); - return + return; }, }; @@ -824,7 +824,7 @@ impl Notifications { Entry::Vacant(entry) => { trace!(target: "sub-libp2p", "PSM => Drop({}, {:?}): Already disabled.", entry.key().0, set_id); - return + return; }, }; @@ -930,7 +930,7 @@ impl Notifications { self.incoming.remove(pos) } else { error!(target: "sub-libp2p", "PSM => Accept({:?}): Invalid index", index); - return + return; }; if !incoming.alive { @@ -945,14 +945,14 @@ impl Notifications { self.peerset.dropped(incoming.set_id, incoming.peer_id, DropReason::Unknown); }, } - return + return; } let state = match self.peers.get_mut(&(incoming.peer_id, incoming.set_id)) { Some(s) => s, None => { debug_assert!(false); - return + return; }, }; @@ -999,20 +999,20 @@ impl Notifications { self.incoming.remove(pos) } else { error!(target: "sub-libp2p", "PSM => Reject({:?}): Invalid index", index); - return + return; }; if !incoming.alive { trace!(target: "sub-libp2p", "PSM => Reject({:?}, {}, {:?}): Obsolete incoming, \ ignoring", index, incoming.peer_id, incoming.set_id); - return + return; } let state = match self.peers.get_mut(&(incoming.peer_id, incoming.set_id)) { Some(s) => s, None => { debug_assert!(false); - return + return; }, }; @@ -1107,10 +1107,10 @@ impl NetworkBehaviour for Notifications { // In all other states, add this new connection to the list of closed inactive // connections. - PeerState::Incoming { connections, .. } | - PeerState::Disabled { connections, .. } | - PeerState::DisabledPendingEnable { connections, .. } | - PeerState::Enabled { connections, .. } => { + PeerState::Incoming { connections, .. } + | PeerState::Disabled { connections, .. } + | PeerState::DisabledPendingEnable { connections, .. } + | PeerState::Enabled { connections, .. } => { trace!(target: "sub-libp2p", "Libp2p => Connected({}, {:?}, {:?}, {:?}): Secondary connection. Leaving closed.", peer_id, set_id, endpoint, *conn); @@ -1134,7 +1134,7 @@ impl NetworkBehaviour for Notifications { } else { error!(target: "sub-libp2p", "inject_connection_closed: State mismatch in the custom protos handler"); debug_assert!(false); - return + return; }; match mem::replace(entry.get_mut(), PeerState::Poisoned) { @@ -1367,9 +1367,9 @@ impl NetworkBehaviour for Notifications { } }, - PeerState::Requested | - PeerState::PendingRequest { .. } | - PeerState::Backoff { .. } => { + PeerState::Requested + | PeerState::PendingRequest { .. } + | PeerState::Backoff { .. } => { // This is a serious bug either in this state machine or in libp2p. error!(target: "sub-libp2p", "`inject_connection_closed` called for unknown peer {}", @@ -1416,7 +1416,9 @@ impl NetworkBehaviour for Notifications { let ban_duration = match st { PeerState::PendingRequest { timer_deadline, .. } if timer_deadline > now => - cmp::max(timer_deadline - now, Duration::from_secs(5)), + { + cmp::max(timer_deadline - now, Duration::from_secs(5)) + }, _ => Duration::from_secs(5), }; @@ -1440,10 +1442,10 @@ impl NetworkBehaviour for Notifications { // We can still get dial failures even if we are already connected to the // peer, as an extra diagnostic for an earlier attempt. - st @ PeerState::Disabled { .. } | - st @ PeerState::Enabled { .. } | - st @ PeerState::DisabledPendingEnable { .. } | - st @ PeerState::Incoming { .. } => { + st @ PeerState::Disabled { .. } + | st @ PeerState::Enabled { .. } + | st @ PeerState::DisabledPendingEnable { .. } + | st @ PeerState::Incoming { .. } => { *entry.into_mut() = st; }, @@ -1474,7 +1476,7 @@ impl NetworkBehaviour for Notifications { "OpenDesiredByRemote: State mismatch in the custom protos handler" ); debug_assert!(false); - return + return; }; match mem::replace(entry.get_mut(), PeerState::Poisoned) { @@ -1536,8 +1538,8 @@ impl NetworkBehaviour for Notifications { // more to do. debug_assert!(matches!( connec_state, - ConnectionState::OpenDesiredByRemote | - ConnectionState::Closing | ConnectionState::Opening + ConnectionState::OpenDesiredByRemote + | ConnectionState::Closing | ConnectionState::Opening )); } } else { @@ -1656,7 +1658,7 @@ impl NetworkBehaviour for Notifications { } else { error!(target: "sub-libp2p", "CloseDesired: State mismatch in the custom protos handler"); debug_assert!(false); - return + return; }; match mem::replace(entry.get_mut(), PeerState::Poisoned) { @@ -1675,12 +1677,12 @@ impl NetworkBehaviour for Notifications { error!(target: "sub-libp2p", "CloseDesired: State mismatch in the custom protos handler"); debug_assert!(false); - return + return; }; if matches!(connections[pos].1, ConnectionState::Closing) { *entry.into_mut() = PeerState::Enabled { connections }; - return + return; } debug_assert!(matches!(connections[pos].1, ConnectionState::Open(_))); @@ -1732,8 +1734,8 @@ impl NetworkBehaviour for Notifications { // All connections in `Disabled` and `DisabledPendingEnable` have been sent a // `Close` message already, and as such ignore any `CloseDesired` message. - state @ PeerState::Disabled { .. } | - state @ PeerState::DisabledPendingEnable { .. } => { + state @ PeerState::Disabled { .. } + | state @ PeerState::DisabledPendingEnable { .. } => { *entry.into_mut() = state; }, state => { @@ -1753,10 +1755,10 @@ impl NetworkBehaviour for Notifications { match self.peers.get_mut(&(source, set_id)) { // Move the connection from `Closing` to `Closed`. - Some(PeerState::Incoming { connections, .. }) | - Some(PeerState::DisabledPendingEnable { connections, .. }) | - Some(PeerState::Disabled { connections, .. }) | - Some(PeerState::Enabled { connections, .. }) => { + Some(PeerState::Incoming { connections, .. }) + | Some(PeerState::DisabledPendingEnable { connections, .. }) + | Some(PeerState::Disabled { connections, .. }) + | Some(PeerState::Enabled { connections, .. }) => { if let Some((_, connec_state)) = connections.iter_mut().find(|(c, s)| { *c == connection && matches!(s, ConnectionState::Closing) }) { @@ -1825,9 +1827,9 @@ impl NetworkBehaviour for Notifications { } }, - Some(PeerState::Incoming { connections, .. }) | - Some(PeerState::DisabledPendingEnable { connections, .. }) | - Some(PeerState::Disabled { connections, .. }) => { + Some(PeerState::Incoming { connections, .. }) + | Some(PeerState::DisabledPendingEnable { connections, .. }) + | Some(PeerState::Disabled { connections, .. }) => { if let Some((_, connec_state)) = connections.iter_mut().find(|(c, s)| { *c == connection && matches!(s, ConnectionState::OpeningThenClosing) }) { @@ -1859,7 +1861,7 @@ impl NetworkBehaviour for Notifications { } else { error!(target: "sub-libp2p", "OpenResultErr: State mismatch in the custom protos handler"); debug_assert!(false); - return + return; }; match mem::replace(entry.get_mut(), PeerState::Poisoned) { @@ -1899,17 +1901,17 @@ impl NetworkBehaviour for Notifications { *entry.into_mut() = PeerState::Enabled { connections }; } }, - mut state @ PeerState::Incoming { .. } | - mut state @ PeerState::DisabledPendingEnable { .. } | - mut state @ PeerState::Disabled { .. } => { + mut state @ PeerState::Incoming { .. } + | mut state @ PeerState::DisabledPendingEnable { .. } + | mut state @ PeerState::Disabled { .. } => { match &mut state { - PeerState::Incoming { connections, .. } | - PeerState::Disabled { connections, .. } | - PeerState::DisabledPendingEnable { connections, .. } => { + PeerState::Incoming { connections, .. } + | PeerState::Disabled { connections, .. } + | PeerState::DisabledPendingEnable { connections, .. } => { if let Some((_, connec_state)) = connections.iter_mut().find(|(c, s)| { - *c == connection && - matches!(s, ConnectionState::OpeningThenClosing) + *c == connection + && matches!(s, ConnectionState::OpeningThenClosing) }) { *connec_state = ConnectionState::Closing; } else { @@ -1975,7 +1977,7 @@ impl NetworkBehaviour for Notifications { _params: &mut impl PollParameters, ) -> Poll> { if let Some(event) = self.events.pop_front() { - return Poll::Ready(event) + return Poll::Ready(event); } // Poll for instructions from the peerset. @@ -1996,7 +1998,7 @@ impl NetworkBehaviour for Notifications { }, Poll::Ready(None) => { error!(target: "sub-libp2p", "Peerset receiver stream has returned None"); - break + break; }, Poll::Pending => break, } @@ -2064,7 +2066,7 @@ impl NetworkBehaviour for Notifications { } if let Some(event) = self.events.pop_front() { - return Poll::Ready(event) + return Poll::Ready(event); } Poll::Pending diff --git a/client/network/src/protocol/notifications/handler.rs b/client/network/src/protocol/notifications/handler.rs index ea09cb76edce1..4ae13a259be71 100644 --- a/client/network/src/protocol/notifications/handler.rs +++ b/client/network/src/protocol/notifications/handler.rs @@ -521,13 +521,13 @@ impl ConnectionHandler for NotifsHandler { // in mind that it is invalid for the remote to open multiple such // substreams, and therefore sending a "RST" is the most correct thing // to do. - return + return; }, - State::Opening { ref mut in_substream, .. } | - State::Open { ref mut in_substream, .. } => { + State::Opening { ref mut in_substream, .. } + | State::Open { ref mut in_substream, .. } => { if in_substream.is_some() { // Same remark as above. - return + return; } // Create `handshake_message` on a separate line to be sure that the @@ -545,8 +545,8 @@ impl ConnectionHandler for NotifsHandler { protocol_index: Self::OutboundOpenInfo, ) { match self.protocols[protocol_index].state { - State::Closed { ref mut pending_opening } | - State::OpenDesiredByRemote { ref mut pending_opening, .. } => { + State::Closed { ref mut pending_opening } + | State::OpenDesiredByRemote { ref mut pending_opening, .. } => { debug_assert!(*pending_opening); *pending_opening = false; }, @@ -682,8 +682,8 @@ impl ConnectionHandler for NotifsHandler { _: ConnectionHandlerUpgrErr, ) { match self.protocols[num].state { - State::Closed { ref mut pending_opening } | - State::OpenDesiredByRemote { ref mut pending_opening, .. } => { + State::Closed { ref mut pending_opening } + | State::OpenDesiredByRemote { ref mut pending_opening, .. } => { debug_assert!(*pending_opening); *pending_opening = false; }, @@ -704,7 +704,7 @@ impl ConnectionHandler for NotifsHandler { fn connection_keep_alive(&self) -> KeepAlive { // `Yes` if any protocol has some activity. if self.protocols.iter().any(|p| !matches!(p.state, State::Closed { .. })) { - return KeepAlive::Yes + return KeepAlive::Yes; } // A grace period of `INITIAL_KEEPALIVE_TIME` must be given to leave time for the remote @@ -724,7 +724,7 @@ impl ConnectionHandler for NotifsHandler { >, > { if let Some(ev) = self.events_queue.pop_front() { - return Poll::Ready(ev) + return Poll::Ready(ev); } // For each open substream, try send messages from `notifications_sink_rx` to the @@ -739,10 +739,11 @@ impl ConnectionHandler for NotifsHandler { // available in `notifications_sink_rx`. This avoids waking up the task when // a substream is ready to send if there isn't actually something to send. match Pin::new(&mut *notifications_sink_rx).as_mut().poll_peek(cx) { - Poll::Ready(Some(&NotificationsSinkMessage::ForceClose)) => + Poll::Ready(Some(&NotificationsSinkMessage::ForceClose)) => { return Poll::Ready(ConnectionHandlerEvent::Close( NotifsHandlerError::SyncNotificationsClogged, - )), + )) + }, Poll::Ready(Some(&NotificationsSinkMessage::Notification { .. })) => {}, Poll::Ready(None) | Poll::Pending => break, } @@ -756,14 +757,15 @@ impl ConnectionHandler for NotifsHandler { // Now that the substream is ready for a message, grab what to send. let message = match notifications_sink_rx.poll_next_unpin(cx) { - Poll::Ready(Some(NotificationsSinkMessage::Notification { message })) => - message, - Poll::Ready(Some(NotificationsSinkMessage::ForceClose)) | - Poll::Ready(None) | - Poll::Pending => { + Poll::Ready(Some(NotificationsSinkMessage::Notification { message })) => { + message + }, + Poll::Ready(Some(NotificationsSinkMessage::ForceClose)) + | Poll::Ready(None) + | Poll::Pending => { // Should never be reached, as per `poll_peek` above. debug_assert!(false); - break + break; }, }; @@ -788,15 +790,15 @@ impl ConnectionHandler for NotifsHandler { Poll::Ready(Err(_)) => { *out_substream = None; let event = NotifsHandlerOut::CloseDesired { protocol_index }; - return Poll::Ready(ConnectionHandlerEvent::Custom(event)) + return Poll::Ready(ConnectionHandlerEvent::Custom(event)); }, }; }, - State::Closed { .. } | - State::Opening { .. } | - State::Open { out_substream: None, .. } | - State::OpenDesiredByRemote { .. } => {}, + State::Closed { .. } + | State::Opening { .. } + | State::Open { out_substream: None, .. } + | State::OpenDesiredByRemote { .. } => {}, } } @@ -805,21 +807,22 @@ impl ConnectionHandler for NotifsHandler { // Inbound substreams being closed is always tolerated, except for the // `OpenDesiredByRemote` state which might need to be switched back to `Closed`. match &mut self.protocols[protocol_index].state { - State::Closed { .. } | - State::Open { in_substream: None, .. } | - State::Opening { in_substream: None } => {}, + State::Closed { .. } + | State::Open { in_substream: None, .. } + | State::Opening { in_substream: None } => {}, - State::Open { in_substream: in_substream @ Some(_), .. } => + State::Open { in_substream: in_substream @ Some(_), .. } => { match Stream::poll_next(Pin::new(in_substream.as_mut().unwrap()), cx) { Poll::Pending => {}, Poll::Ready(Some(Ok(message))) => { let event = NotifsHandlerOut::Notification { protocol_index, message }; - return Poll::Ready(ConnectionHandlerEvent::Custom(event)) + return Poll::Ready(ConnectionHandlerEvent::Custom(event)); }, Poll::Ready(None) | Poll::Ready(Some(Err(_))) => *in_substream = None, - }, + } + }, - State::OpenDesiredByRemote { in_substream, pending_opening } => + State::OpenDesiredByRemote { in_substream, pending_opening } => { match NotificationsInSubstream::poll_process(Pin::new(in_substream), cx) { Poll::Pending => {}, Poll::Ready(Ok(void)) => match void {}, @@ -828,11 +831,12 @@ impl ConnectionHandler for NotifsHandler { State::Closed { pending_opening: *pending_opening }; return Poll::Ready(ConnectionHandlerEvent::Custom( NotifsHandlerOut::CloseDesired { protocol_index }, - )) + )); }, - }, + } + }, - State::Opening { in_substream: in_substream @ Some(_), .. } => + State::Opening { in_substream: in_substream @ Some(_), .. } => { match NotificationsInSubstream::poll_process( Pin::new(in_substream.as_mut().unwrap()), cx, @@ -840,7 +844,8 @@ impl ConnectionHandler for NotifsHandler { Poll::Pending => {}, Poll::Ready(Ok(void)) => match void {}, Poll::Ready(Err(_)) => *in_substream = None, - }, + } + }, } } diff --git a/client/network/src/protocol/notifications/tests.rs b/client/network/src/protocol/notifications/tests.rs index fa79366d20283..c80b2571f8fd8 100644 --- a/client/network/src/protocol/notifications/tests.rs +++ b/client/network/src/protocol/notifications/tests.rs @@ -285,9 +285,9 @@ fn reconnect_after_disconnect() { NotificationsOut::CustomProtocolClosed { .. }, )) => match service1_state { ServiceState::FirstConnec => service1_state = ServiceState::Disconnected, - ServiceState::ConnectedAgain | - ServiceState::NotConnected | - ServiceState::Disconnected => panic!(), + ServiceState::ConnectedAgain + | ServiceState::NotConnected + | ServiceState::Disconnected => panic!(), }, future::Either::Right(SwarmEvent::Behaviour( NotificationsOut::CustomProtocolOpen { .. }, @@ -308,17 +308,17 @@ fn reconnect_after_disconnect() { NotificationsOut::CustomProtocolClosed { .. }, )) => match service2_state { ServiceState::FirstConnec => service2_state = ServiceState::Disconnected, - ServiceState::ConnectedAgain | - ServiceState::NotConnected | - ServiceState::Disconnected => panic!(), + ServiceState::ConnectedAgain + | ServiceState::NotConnected + | ServiceState::Disconnected => panic!(), }, _ => {}, } - if service1_state == ServiceState::ConnectedAgain && - service2_state == ServiceState::ConnectedAgain + if service1_state == ServiceState::ConnectedAgain + && service2_state == ServiceState::ConnectedAgain { - break + break; } } @@ -340,8 +340,8 @@ fn reconnect_after_disconnect() { }; match event { - SwarmEvent::Behaviour(NotificationsOut::CustomProtocolOpen { .. }) | - SwarmEvent::Behaviour(NotificationsOut::CustomProtocolClosed { .. }) => panic!(), + SwarmEvent::Behaviour(NotificationsOut::CustomProtocolOpen { .. }) + | SwarmEvent::Behaviour(NotificationsOut::CustomProtocolClosed { .. }) => panic!(), _ => {}, } } diff --git a/client/network/src/protocol/notifications/upgrade/notifications.rs b/client/network/src/protocol/notifications/upgrade/notifications.rs index 56cfefd75d53d..950ddfd8fff50 100644 --- a/client/network/src/protocol/notifications/upgrade/notifications.rs +++ b/client/network/src/protocol/notifications/upgrade/notifications.rs @@ -149,7 +149,7 @@ where return Err(NotificationsHandshakeError::TooLarge { requested: handshake_len, max: MAX_HANDSHAKE_SIZE, - }) + }); } let mut handshake = vec![0u8; handshake_len]; @@ -197,7 +197,7 @@ where pub fn send_handshake(&mut self, message: impl Into>) { if !matches!(self.handshake, NotificationsInSubstreamHandshake::NotSent) { error!(target: "sub-libp2p", "Tried to send handshake twice"); - return + return; } self.handshake = NotificationsInSubstreamHandshake::PendingSend(message.into()); @@ -224,27 +224,28 @@ where }, Poll::Pending => { *this.handshake = NotificationsInSubstreamHandshake::PendingSend(msg); - return Poll::Pending + return Poll::Pending; }, } }, NotificationsInSubstreamHandshake::Flush => { match Sink::poll_flush(this.socket.as_mut(), cx)? { - Poll::Ready(()) => - *this.handshake = NotificationsInSubstreamHandshake::Sent, + Poll::Ready(()) => { + *this.handshake = NotificationsInSubstreamHandshake::Sent + }, Poll::Pending => { *this.handshake = NotificationsInSubstreamHandshake::Flush; - return Poll::Pending + return Poll::Pending; }, } }, - st @ NotificationsInSubstreamHandshake::NotSent | - st @ NotificationsInSubstreamHandshake::Sent | - st @ NotificationsInSubstreamHandshake::ClosingInResponseToRemote | - st @ NotificationsInSubstreamHandshake::BothSidesClosed => { + st @ NotificationsInSubstreamHandshake::NotSent + | st @ NotificationsInSubstreamHandshake::Sent + | st @ NotificationsInSubstreamHandshake::ClosingInResponseToRemote + | st @ NotificationsInSubstreamHandshake::BothSidesClosed => { *this.handshake = st; - return Poll::Pending + return Poll::Pending; }, } } @@ -265,7 +266,7 @@ where match mem::replace(this.handshake, NotificationsInSubstreamHandshake::Sent) { NotificationsInSubstreamHandshake::NotSent => { *this.handshake = NotificationsInSubstreamHandshake::NotSent; - return Poll::Pending + return Poll::Pending; }, NotificationsInSubstreamHandshake::PendingSend(msg) => { match Sink::poll_ready(this.socket.as_mut(), cx) { @@ -278,47 +279,51 @@ where }, Poll::Pending => { *this.handshake = NotificationsInSubstreamHandshake::PendingSend(msg); - return Poll::Pending + return Poll::Pending; }, } }, NotificationsInSubstreamHandshake::Flush => { match Sink::poll_flush(this.socket.as_mut(), cx)? { - Poll::Ready(()) => - *this.handshake = NotificationsInSubstreamHandshake::Sent, + Poll::Ready(()) => { + *this.handshake = NotificationsInSubstreamHandshake::Sent + }, Poll::Pending => { *this.handshake = NotificationsInSubstreamHandshake::Flush; - return Poll::Pending + return Poll::Pending; }, } }, NotificationsInSubstreamHandshake::Sent => { match Stream::poll_next(this.socket.as_mut(), cx) { - Poll::Ready(None) => + Poll::Ready(None) => { *this.handshake = - NotificationsInSubstreamHandshake::ClosingInResponseToRemote, + NotificationsInSubstreamHandshake::ClosingInResponseToRemote + }, Poll::Ready(Some(msg)) => { *this.handshake = NotificationsInSubstreamHandshake::Sent; - return Poll::Ready(Some(msg)) + return Poll::Ready(Some(msg)); }, Poll::Pending => { *this.handshake = NotificationsInSubstreamHandshake::Sent; - return Poll::Pending + return Poll::Pending; }, } }, - NotificationsInSubstreamHandshake::ClosingInResponseToRemote => + NotificationsInSubstreamHandshake::ClosingInResponseToRemote => { match Sink::poll_close(this.socket.as_mut(), cx)? { - Poll::Ready(()) => - *this.handshake = NotificationsInSubstreamHandshake::BothSidesClosed, + Poll::Ready(()) => { + *this.handshake = NotificationsInSubstreamHandshake::BothSidesClosed + }, Poll::Pending => { *this.handshake = NotificationsInSubstreamHandshake::ClosingInResponseToRemote; - return Poll::Pending + return Poll::Pending; }, - }, + } + }, NotificationsInSubstreamHandshake::BothSidesClosed => return Poll::Ready(None), } @@ -373,7 +378,7 @@ where return Err(NotificationsHandshakeError::TooLarge { requested: handshake_len, max: MAX_HANDSHAKE_SIZE, - }) + }); } let mut handshake = vec![0u8; handshake_len]; diff --git a/client/network/src/request_responses.rs b/client/network/src/request_responses.rs index d49cbd8051341..52737bf121283 100644 --- a/client/network/src/request_responses.rs +++ b/client/network/src/request_responses.rs @@ -212,7 +212,9 @@ impl RequestResponsesBehaviour { match protocols.entry(protocol.name) { Entry::Vacant(e) => e.insert((rq_rp, protocol.inbound_queue)), - Entry::Occupied(e) => return Err(RegisterError::DuplicateProtocol(e.key().clone())), + Entry::Occupied(e) => { + return Err(RegisterError::DuplicateProtocol(e.key().clone())) + }, }; } @@ -366,7 +368,7 @@ impl NetworkBehaviour for RequestResponsesBehaviour { (p_name, event): ::OutEvent, ) { if let Some((proto, _)) = self.protocols.get_mut(&*p_name) { - return proto.inject_event(peer_id, connection, event) + return proto.inject_event(peer_id, connection, event); } log::warn!(target: "sub-libp2p", @@ -461,7 +463,7 @@ impl NetworkBehaviour for RequestResponsesBehaviour { resp_builder, get_peer_reputation, }); - return Poll::Pending + return Poll::Pending; }, Poll::Ready(reputation) => { // Once we get the reputation we can continue processing the request. @@ -477,7 +479,7 @@ impl NetworkBehaviour for RequestResponsesBehaviour { peer, reputation, ); - continue 'poll_all + continue 'poll_all; } let (tx, rx) = oneshot::channel(); @@ -516,7 +518,7 @@ impl NetworkBehaviour for RequestResponsesBehaviour { // This `continue` makes sure that `pending_responses` gets polled // after we have added the new element. - continue 'poll_all + continue 'poll_all; }, } } @@ -557,7 +559,7 @@ impl NetworkBehaviour for RequestResponsesBehaviour { if !reputation_changes.is_empty() { return Poll::Ready(NetworkBehaviourAction::GenerateEvent( Event::ReputationChanges { peer, changes: reputation_changes }, - )) + )); } } @@ -578,24 +580,27 @@ impl NetworkBehaviour for RequestResponsesBehaviour { } let protocol = protocol.to_string(); let handler = self.new_handler_with_replacement(protocol, handler); - return Poll::Ready(NetworkBehaviourAction::Dial { opts, handler }) + return Poll::Ready(NetworkBehaviourAction::Dial { opts, handler }); }, - NetworkBehaviourAction::NotifyHandler { peer_id, handler, event } => + NetworkBehaviourAction::NotifyHandler { peer_id, handler, event } => { return Poll::Ready(NetworkBehaviourAction::NotifyHandler { peer_id, handler, event: ((*protocol).to_string(), event), - }), - NetworkBehaviourAction::ReportObservedAddr { address, score } => + }) + }, + NetworkBehaviourAction::ReportObservedAddr { address, score } => { return Poll::Ready(NetworkBehaviourAction::ReportObservedAddr { address, score, - }), - NetworkBehaviourAction::CloseConnection { peer_id, connection } => + }) + }, + NetworkBehaviourAction::CloseConnection { peer_id, connection } => { return Poll::Ready(NetworkBehaviourAction::CloseConnection { peer_id, connection, - }), + }) + }, }; match ev { @@ -626,7 +631,7 @@ impl NetworkBehaviour for RequestResponsesBehaviour { // This `continue` makes sure that `message_request` gets polled // after we have added the new element. - continue 'poll_all + continue 'poll_all; }, // Received a response from a remote to one of our requests. @@ -652,7 +657,7 @@ impl NetworkBehaviour for RequestResponsesBehaviour { request_id, ); debug_assert!(false); - continue + continue; }, }; @@ -663,7 +668,7 @@ impl NetworkBehaviour for RequestResponsesBehaviour { result: delivered, }; - return Poll::Ready(NetworkBehaviourAction::GenerateEvent(out)) + return Poll::Ready(NetworkBehaviourAction::GenerateEvent(out)); }, // One of our requests has failed. @@ -695,7 +700,7 @@ impl NetworkBehaviour for RequestResponsesBehaviour { request_id, ); debug_assert!(false); - continue + continue; }, }; @@ -706,7 +711,7 @@ impl NetworkBehaviour for RequestResponsesBehaviour { result: Err(RequestFailure::Network(error)), }; - return Poll::Ready(NetworkBehaviourAction::GenerateEvent(out)) + return Poll::Ready(NetworkBehaviourAction::GenerateEvent(out)); }, // An inbound request failed, either while reading the request or due to @@ -722,7 +727,7 @@ impl NetworkBehaviour for RequestResponsesBehaviour { protocol: protocol.clone(), result: Err(ResponseFailure::Network(error)), }; - return Poll::Ready(NetworkBehaviourAction::GenerateEvent(out)) + return Poll::Ready(NetworkBehaviourAction::GenerateEvent(out)); }, // A response to an inbound request has been sent. @@ -751,13 +756,13 @@ impl NetworkBehaviour for RequestResponsesBehaviour { result: Ok(arrival_time), }; - return Poll::Ready(NetworkBehaviourAction::GenerateEvent(out)) + return Poll::Ready(NetworkBehaviourAction::GenerateEvent(out)); }, }; } } - break Poll::Pending + break Poll::Pending; } } } @@ -809,7 +814,7 @@ impl RequestResponseCodec for GenericCodec { return Err(io::Error::new( io::ErrorKind::InvalidInput, format!("Request size exceeds limit: {} > {}", length, self.max_request_size), - )) + )); } // Read the payload. @@ -836,7 +841,9 @@ impl RequestResponseCodec for GenericCodec { Ok(l) => l, Err(unsigned_varint::io::ReadError::Io(err)) if matches!(err.kind(), io::ErrorKind::UnexpectedEof) => - return Ok(Err(())), + { + return Ok(Err(())) + }, Err(err) => return Err(io::Error::new(io::ErrorKind::InvalidInput, err)), }; @@ -844,7 +851,7 @@ impl RequestResponseCodec for GenericCodec { return Err(io::Error::new( io::ErrorKind::InvalidInput, format!("Response size exceeds limit: {} > {}", length, self.max_response_size), - )) + )); } // Read the payload. @@ -1057,7 +1064,7 @@ mod tests { }, SwarmEvent::Behaviour(Event::RequestFinished { result, .. }) => { result.unwrap(); - break + break; }, _ => {}, } @@ -1126,7 +1133,7 @@ mod tests { match swarm.select_next_some().await { SwarmEvent::Behaviour(Event::InboundRequest { result, .. }) => { assert!(result.is_ok()); - break + break; }, _ => {}, } @@ -1160,7 +1167,7 @@ mod tests { }, SwarmEvent::Behaviour(Event::RequestFinished { result, .. }) => { assert!(result.is_err()); - break + break; }, _ => {}, } @@ -1329,7 +1336,7 @@ mod tests { num_responses += 1; result.unwrap(); if num_responses == 2 { - break + break; } }, _ => {}, diff --git a/client/network/src/service.rs b/client/network/src/service.rs index 5ffd36007f530..8d3146114815d 100644 --- a/client/network/src/service.rs +++ b/client/network/src/service.rs @@ -376,8 +376,9 @@ where match result { Ok(b) => b, - Err(crate::request_responses::RegisterError::DuplicateProtocol(proto)) => - return Err(Error::DuplicateRequestResponseProtocol { protocol: proto }), + Err(crate::request_responses::RegisterError::DuplicateProtocol(proto)) => { + return Err(Error::DuplicateRequestResponseProtocol { protocol: proto }) + }, } }; @@ -602,7 +603,7 @@ where } else { error!(target: "sub-libp2p", "Found state inconsistency between custom protocol \ and debug information about {:?}", peer_id); - return None + return None; }; Some(( @@ -896,7 +897,7 @@ where fn add_reserved_peer(&self, peer: MultiaddrWithPeerId) -> Result<(), String> { // Make sure the local peer ID is never added to the PSM. if peer.peer_id == self.local_peer_id { - return Err("Local peer ID cannot be added as a reserved peer.".to_string()) + return Err("Local peer ID cannot be added as a reserved peer.".to_string()); } let _ = self @@ -922,7 +923,7 @@ where for (peer_id, addr) in peers_addrs.into_iter() { // Make sure the local peer ID is never added to the PSM. if peer_id == self.local_peer_id { - return Err("Local peer ID cannot be added as a reserved peer.".to_string()) + return Err("Local peer ID cannot be added as a reserved peer.".to_string()); } peers.insert(peer_id); @@ -951,7 +952,7 @@ where for (peer_id, addr) in peers.into_iter() { // Make sure the local peer ID is never added to the PSM. if peer_id == self.local_peer_id { - return Err("Local peer ID cannot be added as a reserved peer.".to_string()) + return Err("Local peer ID cannot be added as a reserved peer.".to_string()); } if !addr.is_empty() { @@ -985,7 +986,7 @@ where for (peer_id, addr) in peers.into_iter() { // Make sure the local peer ID is never added to the PSM. if peer_id == self.local_peer_id { - return Err("Local peer ID cannot be added as a reserved peer.".to_string()) + return Err("Local peer ID cannot be added as a reserved peer.".to_string()); } if !addr.is_empty() { @@ -1045,7 +1046,7 @@ where "Attempted to send notification on missing or closed substream: {}, {:?}", target, protocol, ); - return + return; } }; @@ -1077,7 +1078,7 @@ where if let Some(sink) = peers_notifications_sinks.get(&(target, protocol.clone())) { sink.clone() } else { - return Err(NotificationSenderError::Closed) + return Err(NotificationSenderError::Closed); } }; @@ -1329,7 +1330,7 @@ where num_iterations += 1; if num_iterations >= 100 { cx.waker().wake_by_ref(); - break + break; } // Process the next message coming from the `NetworkService`. @@ -1354,10 +1355,12 @@ where .behaviour_mut() .user_protocol_mut() .clear_justification_requests(), - ServiceToWorkerMsg::GetValue(key) => - this.network_service.behaviour_mut().get_value(key), - ServiceToWorkerMsg::PutValue(key, value) => - this.network_service.behaviour_mut().put_value(key, value), + ServiceToWorkerMsg::GetValue(key) => { + this.network_service.behaviour_mut().get_value(key) + }, + ServiceToWorkerMsg::PutValue(key, value) => { + this.network_service.behaviour_mut().put_value(key, value) + }, ServiceToWorkerMsg::SetReservedOnly(reserved_only) => this .network_service .behaviour_mut() @@ -1393,8 +1396,9 @@ where .behaviour_mut() .user_protocol_mut() .remove_set_reserved_peer(protocol, peer_id), - ServiceToWorkerMsg::AddKnownAddress(peer_id, addr) => - this.network_service.behaviour_mut().add_known_address(peer_id, addr), + ServiceToWorkerMsg::AddKnownAddress(peer_id, addr) => { + this.network_service.behaviour_mut().add_known_address(peer_id, addr) + }, ServiceToWorkerMsg::AddToPeersSet(protocol, peer_id) => this .network_service .behaviour_mut() @@ -1447,7 +1451,7 @@ where num_iterations += 1; if num_iterations >= 1000 { cx.waker().wake_by_ref(); - break + break; } // Process the next action coming from the network. @@ -1575,11 +1579,15 @@ where // inbound request whenever a request with an unsupported // protocol is received. This is not reported in order to // avoid confusions. - continue, - ResponseFailure::Network(InboundFailure::ResponseOmission) => - "busy-omitted", - ResponseFailure::Network(InboundFailure::ConnectionClosed) => - "connection-closed", + { + continue + }, + ResponseFailure::Network(InboundFailure::ResponseOmission) => { + "busy-omitted" + }, + ResponseFailure::Network(InboundFailure::ConnectionClosed) => { + "connection-closed" + }, }; metrics @@ -1595,7 +1603,7 @@ where duration, result, .. - })) => + })) => { if let Some(metrics) = this.metrics.as_ref() { match result { Ok(_) => { @@ -1610,11 +1618,13 @@ where RequestFailure::UnknownProtocol => "unknown-protocol", RequestFailure::Refused => "refused", RequestFailure::Obsolete => "obsolete", - RequestFailure::Network(OutboundFailure::DialFailure) => - "dial-failure", + RequestFailure::Network(OutboundFailure::DialFailure) => { + "dial-failure" + }, RequestFailure::Network(OutboundFailure::Timeout) => "timeout", - RequestFailure::Network(OutboundFailure::ConnectionClosed) => - "connection-closed", + RequestFailure::Network(OutboundFailure::ConnectionClosed) => { + "connection-closed" + }, RequestFailure::Network( OutboundFailure::UnsupportedProtocols, ) => "unsupported", @@ -1626,14 +1636,16 @@ where .inc(); }, } - }, + } + }, Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::ReputationChanges { peer, changes, - })) => + })) => { for change in changes { this.network_service.behaviour().user_protocol().report_peer(peer, change); - }, + } + }, Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::PeerIdentify { peer_id, info: @@ -1669,10 +1681,11 @@ where .user_protocol_mut() .add_default_set_discovered_nodes(iter::once(peer_id)); }, - Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::RandomKademliaStarted)) => + Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::RandomKademliaStarted)) => { if let Some(metrics) = this.metrics.as_ref() { metrics.kademlia_random_queries_total.inc(); - }, + } + }, Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::NotificationStreamOpened { remote, protocol, @@ -1894,14 +1907,15 @@ where let reason = match error { DialError::ConnectionLimit(_) => Some("limit-reached"), DialError::InvalidPeerId(_) => Some("invalid-peer-id"), - DialError::Transport(_) | DialError::ConnectionIo(_) => - Some("transport-error"), - DialError::Banned | - DialError::LocalPeerId | - DialError::NoAddresses | - DialError::DialPeerConditionFalse(_) | - DialError::WrongPeerId { .. } | - DialError::Aborted => None, // ignore them + DialError::Transport(_) | DialError::ConnectionIo(_) => { + Some("transport-error") + }, + DialError::Banned + | DialError::LocalPeerId + | DialError::NoAddresses + | DialError::DialPeerConditionFalse(_) + | DialError::WrongPeerId { .. } + | DialError::Aborted => None, // ignore them }; if let Some(reason) = reason { metrics @@ -1935,8 +1949,8 @@ where let reason = match error { PendingConnectionError::ConnectionLimit(_) => Some("limit-reached"), PendingConnectionError::WrongPeerId { .. } => Some("invalid-peer-id"), - PendingConnectionError::Transport(_) | - PendingConnectionError::IO(_) => Some("transport-error"), + PendingConnectionError::Transport(_) + | PendingConnectionError::IO(_) => Some("transport-error"), PendingConnectionError::Aborted => None, // ignore it }; @@ -2113,7 +2127,7 @@ fn ensure_addresses_consistent_with_transport<'a>( return Err(Error::AddressesForAnotherTransport { transport: transport.clone(), addresses, - }) + }); } } else { let addresses: Vec<_> = addresses @@ -2125,7 +2139,7 @@ fn ensure_addresses_consistent_with_transport<'a>( return Err(Error::AddressesForAnotherTransport { transport: transport.clone(), addresses, - }) + }); } } diff --git a/client/network/src/service/out_events.rs b/client/network/src/service/out_events.rs index 4144d7f19551e..7a8012cfd04cf 100644 --- a/client/network/src/service/out_events.rs +++ b/client/network/src/service/out_events.rs @@ -261,7 +261,7 @@ impl Metrics { .inc_by(num); }); }, - Event::NotificationsReceived { messages, .. } => + Event::NotificationsReceived { messages, .. } => { for (protocol, message) in messages { format_label("notif-", protocol, |protocol_label| { self.events_total @@ -271,7 +271,8 @@ impl Metrics { self.notifications_sizes.with_label_values(&[protocol, "sent", name]).inc_by( num.saturating_mul(u64::try_from(message.len()).unwrap_or(u64::MAX)), ); - }, + } + }, } } @@ -298,7 +299,7 @@ impl Metrics { self.events_total.with_label_values(&[protocol_label, "received", name]).inc(); }); }, - Event::NotificationsReceived { messages, .. } => + Event::NotificationsReceived { messages, .. } => { for (protocol, message) in messages { format_label("notif-", protocol, |protocol_label| { self.events_total @@ -308,7 +309,8 @@ impl Metrics { self.notifications_sizes .with_label_values(&[protocol, "received", name]) .inc_by(u64::try_from(message.len()).unwrap_or(u64::MAX)); - }, + } + }, } } } diff --git a/client/network/src/service/tests/mod.rs b/client/network/src/service/tests/mod.rs index ef25616a07b0d..d992ee6df3dc2 100644 --- a/client/network/src/service/tests/mod.rs +++ b/client/network/src/service/tests/mod.rs @@ -223,11 +223,12 @@ impl TestNetworkBuilder { let (chain_sync, chain_sync_service) = ChainSync::new( match network_config.sync_mode { config::SyncMode::Full => sc_network_common::sync::SyncMode::Full, - config::SyncMode::Fast { skip_proofs, storage_chain_mode } => + config::SyncMode::Fast { skip_proofs, storage_chain_mode } => { sc_network_common::sync::SyncMode::LightState { skip_proofs, storage_chain_mode, - }, + } + }, config::SyncMode::Warp => sc_network_common::sync::SyncMode::Warp, }, client.clone(), diff --git a/client/network/src/service/tests/service.rs b/client/network/src/service/tests/service.rs index 90945fdcef2cf..4005bc3a78429 100644 --- a/client/network/src/service/tests/service.rs +++ b/client/network/src/service/tests/service.rs @@ -101,7 +101,7 @@ fn notifications_state_consistent() { iterations += 1; if iterations >= 1_000 { assert!(something_happened); - break + break; } // Start by sending a notification from node1 to node2 and vice-versa. Part of the @@ -137,10 +137,12 @@ fn notifications_state_consistent() { // forever while nothing at all happens on the network. let continue_test = futures_timer::Delay::new(Duration::from_millis(20)); match future::select(future::select(next1, next2), continue_test).await { - future::Either::Left((future::Either::Left((Some(ev), _)), _)) => - future::Either::Left(ev), - future::Either::Left((future::Either::Right((Some(ev), _)), _)) => - future::Either::Right(ev), + future::Either::Left((future::Either::Left((Some(ev), _)), _)) => { + future::Either::Left(ev) + }, + future::Either::Left((future::Either::Right((Some(ev), _)), _)) => { + future::Either::Right(ev) + }, future::Either::Right(_) => continue, _ => break, } @@ -149,38 +151,42 @@ fn notifications_state_consistent() { match next_event { future::Either::Left(Event::NotificationStreamOpened { remote, protocol, .. - }) => + }) => { if protocol == PROTOCOL_NAME.into() { something_happened = true; assert!(!node1_to_node2_open); node1_to_node2_open = true; assert_eq!(remote, node2.local_peer_id()); - }, + } + }, future::Either::Right(Event::NotificationStreamOpened { remote, protocol, .. - }) => + }) => { if protocol == PROTOCOL_NAME.into() { something_happened = true; assert!(!node2_to_node1_open); node2_to_node1_open = true; assert_eq!(remote, node1.local_peer_id()); - }, + } + }, future::Either::Left(Event::NotificationStreamClosed { remote, protocol, .. - }) => + }) => { if protocol == PROTOCOL_NAME.into() { assert!(node1_to_node2_open); node1_to_node2_open = false; assert_eq!(remote, node2.local_peer_id()); - }, + } + }, future::Either::Right(Event::NotificationStreamClosed { remote, protocol, .. - }) => + }) => { if protocol == PROTOCOL_NAME.into() { assert!(node2_to_node1_open); node2_to_node1_open = false; assert_eq!(remote, node1.local_peer_id()); - }, + } + }, future::Either::Left(Event::NotificationsReceived { remote, .. }) => { assert!(node1_to_node2_open); assert_eq!(remote, node2.local_peer_id()); @@ -296,12 +302,13 @@ fn notifications_back_pressure() { while received_notifications < TOTAL_NOTIFS { match events_stream2.next().await.unwrap() { Event::NotificationStreamClosed { .. } => panic!(), - Event::NotificationsReceived { messages, .. } => + Event::NotificationsReceived { messages, .. } => { for message in messages { assert_eq!(message.0, PROTOCOL_NAME.into()); assert_eq!(message.1, format!("hello #{}", received_notifications)); received_notifications += 1; - }, + } + }, _ => {}, }; @@ -376,7 +383,7 @@ fn fallback_name_working() { Event::NotificationStreamOpened { protocol, negotiated_fallback, .. } => { assert_eq!(protocol, PROTOCOL_NAME.into()); assert_eq!(negotiated_fallback, None); - break + break; }, _ => {}, }; @@ -391,7 +398,7 @@ fn fallback_name_working() { if protocol == NEW_PROTOCOL_NAME.into() => { assert_eq!(negotiated_fallback, Some(PROTOCOL_NAME.into())); - break + break; }, _ => {}, }; diff --git a/client/network/sync/src/block_request_handler.rs b/client/network/sync/src/block_request_handler.rs index b5f8b6b73bce9..2e3d0218ce153 100644 --- a/client/network/sync/src/block_request_handler.rs +++ b/client/network/sync/src/block_request_handler.rs @@ -365,11 +365,12 @@ where let body = if get_body { match self.client.block_body(hash)? { - Some(mut extrinsics) => - extrinsics.iter_mut().map(|extrinsic| extrinsic.encode()).collect(), + Some(mut extrinsics) => { + extrinsics.iter_mut().map(|extrinsic| extrinsic.encode()).collect() + }, None => { log::trace!(target: LOG_TARGET, "Missing data for block request."); - break + break; }, } } else { @@ -406,13 +407,13 @@ where indexed_body, }; - let new_total_size = total_size + - block_data.body.iter().map(|ex| ex.len()).sum::() + - block_data.indexed_body.iter().map(|ex| ex.len()).sum::(); + let new_total_size = total_size + + block_data.body.iter().map(|ex| ex.len()).sum::() + + block_data.indexed_body.iter().map(|ex| ex.len()).sum::(); // Send at least one block, but make sure to not exceed the limit. if !blocks.is_empty() && new_total_size > MAX_BODY_BYTES { - break + break; } total_size = new_total_size; @@ -420,14 +421,14 @@ where blocks.push(block_data); if blocks.len() >= max_blocks as usize { - break + break; } match direction { Direction::Ascending => block_id = BlockId::Number(number + One::one()), Direction::Descending => { if number.is_zero() { - break + break; } block_id = BlockId::Hash(parent_hash) }, diff --git a/client/network/sync/src/blocks.rs b/client/network/sync/src/blocks.rs index b8acd61a2009f..945e746bf4604 100644 --- a/client/network/sync/src/blocks.rs +++ b/client/network/sync/src/blocks.rs @@ -82,7 +82,7 @@ impl BlockCollection { /// Insert a set of blocks into collection. pub fn insert(&mut self, start: NumberFor, blocks: Vec>, who: PeerId) { if blocks.is_empty() { - return + return; } match self.blocks.get(&start) { @@ -91,7 +91,7 @@ impl BlockCollection { }, Some(&BlockRangeState::Complete(ref existing)) if existing.len() >= blocks.len() => { trace!(target: "sync", "Ignored block data already downloaded: {}", start); - return + return; }, _ => (), } @@ -117,7 +117,7 @@ impl BlockCollection { ) -> Option>> { if peer_best <= common { // Bail out early - return None + return None; } // First block number that we need to download let first_different = common + >::one(); @@ -130,24 +130,28 @@ impl BlockCollection { break match (prev, next) { (Some((start, &BlockRangeState::Downloading { ref len, downloading })), _) if downloading < max_parallel => - (*start..*start + *len, downloading), - (Some((start, r)), Some((next_start, _))) if *start + r.len() < *next_start => - (*start + r.len()..cmp::min(*next_start, *start + r.len() + count), 0), // gap + { + (*start..*start + *len, downloading) + }, + (Some((start, r)), Some((next_start, _))) if *start + r.len() < *next_start => { + (*start + r.len()..cmp::min(*next_start, *start + r.len() + count), 0) + }, // gap (Some((start, r)), None) => (*start + r.len()..*start + r.len() + count, 0), /* last range */ (None, None) => (first_different..first_different + count, 0), /* empty */ - (None, Some((start, _))) if *start > first_different => - (first_different..cmp::min(first_different + count, *start), 0), /* gap at the start */ + (None, Some((start, _))) if *start > first_different => { + (first_different..cmp::min(first_different + count, *start), 0) + }, /* gap at the start */ _ => { prev = next; - continue + continue; }, - } + }; } }; // crop to peers best if range.start > peer_best { trace!(target: "sync", "Out of range for peer {} ({} vs {})", who, range.start, peer_best); - return None + return None; } range.end = cmp::min(peer_best + One::one(), range.end); @@ -158,7 +162,7 @@ impl BlockCollection { .map_or(false, |(n, _)| range.start > *n + max_ahead.into()) { trace!(target: "sync", "Too far ahead for peer {} ({})", who, range.start); - return None + return None; } self.peer_requests.insert(who, range.start); @@ -189,7 +193,7 @@ impl BlockCollection { let mut prev = from; for (&start, range_data) in &mut self.blocks { if start > prev { - break + break; } let len = match range_data { BlockRangeState::Complete(blocks) => { diff --git a/client/network/sync/src/extra_requests.rs b/client/network/sync/src/extra_requests.rs index 0506bd542ff3b..2911768a198eb 100644 --- a/client/network/sync/src/extra_requests.rs +++ b/client/network/sync/src/extra_requests.rs @@ -130,7 +130,7 @@ impl ExtraRequests { ); self.importing_requests.insert(request); - return Some((who, request.0, request.1, r)) + return Some((who, request.0, request.1, r)); } else { trace!(target: "sync", "Empty {} response from {:?} for {:?}", @@ -161,7 +161,7 @@ impl ExtraRequests { let request = (*best_finalized_hash, best_finalized_number); if self.try_finalize_root::<()>(request, Ok(request), false) { - return Ok(()) + return Ok(()); } if best_finalized_number > self.best_seen_finalized_number { @@ -201,7 +201,7 @@ impl ExtraRequests { reschedule_on_failure: bool, ) -> bool { if !self.importing_requests.remove(&request) { - return false + return false; } let (finalized_hash, finalized_number) = match result { @@ -210,7 +210,7 @@ impl ExtraRequests { if reschedule_on_failure { self.pending_requests.push_front(request); } - return true + return true; }, }; @@ -219,7 +219,7 @@ impl ExtraRequests { "‼️ Imported {:?} {:?} which isn't a root in the tree: {:?}", finalized_hash, finalized_number, self.tree.roots().collect::>() ); - return true + return true; } self.failed_requests.clear(); @@ -287,7 +287,7 @@ impl<'a, B: BlockT> Matcher<'a, B> { peers: &HashMap>, ) -> Option<(PeerId, ExtraRequest)> { if self.remaining == 0 { - return None + return None; } // clean up previously failed requests so we can retry again @@ -302,11 +302,11 @@ impl<'a, B: BlockT> Matcher<'a, B> { // only ask peers that have synced at least up to the block number that we're asking // the extra for if sync.best_number < request.1 { - continue + continue; } // don't request to any peers that already have pending requests if self.extras.active_requests.contains_key(peer) { - continue + continue; } // only ask if the same request has not failed for this peer before if self @@ -316,7 +316,7 @@ impl<'a, B: BlockT> Matcher<'a, B> { .map(|rr| rr.iter().any(|i| &i.0 == peer)) .unwrap_or(false) { - continue + continue; } self.extras.active_requests.insert(*peer, request); @@ -325,14 +325,14 @@ impl<'a, B: BlockT> Matcher<'a, B> { self.extras.request_type_name, peer, request, ); - return Some((*peer, request)) + return Some((*peer, request)); } self.extras.pending_requests.push_back(request); self.remaining -= 1; if self.remaining == 0 { - break + break; } } diff --git a/client/network/sync/src/lib.rs b/client/network/sync/src/lib.rs index 75ecb9322ca78..036fcbf5a12b9 100644 --- a/client/network/sync/src/lib.rs +++ b/client/network/sync/src/lib.rs @@ -439,8 +439,9 @@ where phase: WarpSyncPhase::DownloadingBlocks(gap_sync.best_queued_number), total_bytes: 0, }), - (None, SyncMode::Warp, _) => - Some(WarpSyncProgress { phase: WarpSyncPhase::AwaitingPeers, total_bytes: 0 }), + (None, SyncMode::Warp, _) => { + Some(WarpSyncProgress { phase: WarpSyncPhase::AwaitingPeers, total_bytes: 0 }) + }, (Some(sync), _, _) => Some(sync.progress()), _ => None, }; @@ -489,7 +490,7 @@ where Ok(BlockStatus::Unknown) => { if best_number.is_zero() { info!("💔 New peer with unknown genesis hash {} ({}).", best_hash, best_number); - return Err(BadPeer(who, rep::GENESIS_MISMATCH)) + return Err(BadPeer(who, rep::GENESIS_MISMATCH)); } // If there are more than `MAJOR_SYNC_BLOCKS` in the import queue then we have // enough to do in the import queue that it's not worth kicking off @@ -511,7 +512,7 @@ where state: PeerSyncState::Available, }, ); - return Ok(None) + return Ok(None); } // If we are at genesis, just start downloading. @@ -569,9 +570,9 @@ where Ok(req) }, - Ok(BlockStatus::Queued) | - Ok(BlockStatus::InChainWithState) | - Ok(BlockStatus::InChainPruned) => { + Ok(BlockStatus::Queued) + | Ok(BlockStatus::InChainWithState) + | Ok(BlockStatus::InChainPruned) => { debug!( target: "sync", "New peer with known best hash {} ({}).", @@ -636,14 +637,14 @@ where if self.is_known(hash) { debug!(target: "sync", "Refusing to sync known hash {:?}", hash); - return + return; } trace!(target: "sync", "Downloading requested old fork {:?}", hash); for peer_id in &peers { if let Some(peer) = self.peers.get_mut(peer_id) { if let PeerSyncState::AncestorSearch { .. } = peer.state { - continue + continue; } if number > peer.best_number { @@ -692,16 +693,16 @@ where &'a mut self, ) -> Box)> + 'a> { if self.mode == SyncMode::Warp { - return Box::new(std::iter::once(self.warp_target_block_request()).flatten()) + return Box::new(std::iter::once(self.warp_target_block_request()).flatten()); } if self.allowed_requests.is_empty() || self.state_sync.is_some() { - return Box::new(std::iter::empty()) + return Box::new(std::iter::empty()); } if self.queue_blocks.len() > MAX_IMPORTING_BLOCKS { trace!(target: "sync", "Too many blocks in the queue."); - return Box::new(std::iter::empty()) + return Box::new(std::iter::empty()); } let is_major_syncing = self.status().state.is_major_syncing(); let attrs = self.required_block_attributes(); @@ -717,7 +718,7 @@ where let gap_sync = &mut self.gap_sync; let iter = self.peers.iter_mut().filter_map(move |(&id, peer)| { if !peer.state.is_available() || !allowed_requests.contains(&id) { - return None + return None; } // If our best queued is more than `MAX_BLOCKS_TO_LOOK_BACKWARDS` blocks away from the @@ -725,10 +726,10 @@ where // number is smaller than the last finalized block number, we should do an ancestor // search to find a better common block. If the queue is full we wait till all blocks // are imported though. - if best_queued.saturating_sub(peer.common_number) > MAX_BLOCKS_TO_LOOK_BACKWARDS.into() && - best_queued < peer.best_number && - peer.common_number < last_finalized && - queue.len() <= MAJOR_SYNC_BLOCKS.into() + if best_queued.saturating_sub(peer.common_number) > MAX_BLOCKS_TO_LOOK_BACKWARDS.into() + && best_queued < peer.best_number + && peer.common_number < last_finalized + && queue.len() <= MAJOR_SYNC_BLOCKS.into() { trace!( target: "sync", @@ -803,17 +804,17 @@ where fn state_request(&mut self) -> Option<(PeerId, OpaqueStateRequest)> { if self.allowed_requests.is_empty() { - return None + return None; } - if (self.state_sync.is_some() || self.warp_sync.is_some()) && - self.peers.iter().any(|(_, peer)| peer.state == PeerSyncState::DownloadingState) + if (self.state_sync.is_some() || self.warp_sync.is_some()) + && self.peers.iter().any(|(_, peer)| peer.state == PeerSyncState::DownloadingState) { // Only one pending state request is allowed. - return None + return None; } if let Some(sync) = &self.state_sync { if sync.is_complete() { - return None + return None; } for (id, peer) in self.peers.iter_mut() { @@ -822,13 +823,13 @@ where let request = sync.next_request(); trace!(target: "sync", "New StateRequest for {}: {:?}", id, request); self.allowed_requests.clear(); - return Some((*id, OpaqueStateRequest(Box::new(request)))) + return Some((*id, OpaqueStateRequest(Box::new(request)))); } } } if let Some(sync) = &self.warp_sync { if sync.is_complete() { - return None + return None; } if let (Some(request), Some(target)) = (sync.next_state_request(), sync.target_block_number()) @@ -838,7 +839,7 @@ where trace!(target: "sync", "New StateRequest for {}: {:?}", id, request); peer.state = PeerSyncState::DownloadingState; self.allowed_requests.clear(); - return Some((*id, OpaqueStateRequest(Box::new(request)))) + return Some((*id, OpaqueStateRequest(Box::new(request)))); } } } @@ -848,14 +849,15 @@ where fn warp_sync_request(&mut self) -> Option<(PeerId, WarpProofRequest)> { if let Some(sync) = &self.warp_sync { - if self.allowed_requests.is_empty() || - sync.is_complete() || - self.peers + if self.allowed_requests.is_empty() + || sync.is_complete() + || self + .peers .iter() .any(|(_, peer)| peer.state == PeerSyncState::DownloadingWarpProof) { // Only one pending state request is allowed. - return None + return None; } if let Some(request) = sync.next_warp_proof_request() { let mut targets: Vec<_> = self.peers.values().map(|p| p.best_number).collect(); @@ -868,7 +870,7 @@ where trace!(target: "sync", "New WarpProofRequest for {}", id); peer.state = PeerSyncState::DownloadingWarpProof; self.allowed_requests.clear(); - return Some((*id, request)) + return Some((*id, request)); } } } @@ -943,14 +945,14 @@ where blocks } else { debug!(target: "sync", "Unexpected gap block response from {}", who); - return Err(BadPeer(*who, rep::NO_BLOCK)) + return Err(BadPeer(*who, rep::NO_BLOCK)); } }, PeerSyncState::DownloadingStale(_) => { peer.state = PeerSyncState::Available; if blocks.is_empty() { debug!(target: "sync", "Empty block response from {}", who); - return Err(BadPeer(*who, rep::NO_BLOCK)) + return Err(BadPeer(*who, rep::NO_BLOCK)); } validate_blocks::(&blocks, who, Some(request))?; blocks @@ -992,7 +994,7 @@ where "Invalid response when searching for ancestor from {}", who, ); - return Err(BadPeer(*who, rep::UNKNOWN_ANCESTOR)) + return Err(BadPeer(*who, rep::UNKNOWN_ANCESTOR)); }, (_, Err(e)) => { info!( @@ -1000,12 +1002,12 @@ where "❌ Error answering legitimate blockchain query: {}", e, ); - return Err(BadPeer(*who, rep::BLOCKCHAIN_READ_ERROR)) + return Err(BadPeer(*who, rep::BLOCKCHAIN_READ_ERROR)); }, }; if matching_hash.is_some() { - if *start < self.best_queued_number && - self.best_queued_number <= peer.best_number + if *start < self.best_queued_number + && self.best_queued_number <= peer.best_number { // We've made progress on this chain since the search was started. // Opportunistically set common number to updated number @@ -1017,7 +1019,7 @@ where } if matching_hash.is_none() && current.is_zero() { trace!(target:"sync", "Ancestry search: genesis mismatch for peer {}", who); - return Err(BadPeer(*who, rep::GENESIS_MISMATCH)) + return Err(BadPeer(*who, rep::GENESIS_MISMATCH)); } if let Some((next_state, next_num)) = handle_ancestor_search_state(state, *current, matching_hash.is_some()) @@ -1027,7 +1029,7 @@ where start: *start, state: next_state, }; - return Ok(OnBlockData::Request(*who, ancestry_request::(next_num))) + return Ok(OnBlockData::Request(*who, ancestry_request::(next_num))); } else { // Ancestry search is complete. Check if peer is on a stale fork unknown // to us and add it to sync targets if necessary. @@ -1041,8 +1043,8 @@ where matching_hash, peer.common_number, ); - if peer.common_number < peer.best_number && - peer.best_number < self.best_queued_number + if peer.common_number < peer.best_number + && peer.best_number < self.best_queued_number { trace!( target: "sync", @@ -1072,14 +1074,16 @@ where match warp_sync.import_target_block( blocks.pop().expect("`blocks` len checked above."), ) { - TargetBlockImportResult::Success => - return Ok(OnBlockData::Continue), - TargetBlockImportResult::BadResponse => - return Err(BadPeer(*who, rep::VERIFICATION_FAIL)), + TargetBlockImportResult::Success => { + return Ok(OnBlockData::Continue) + }, + TargetBlockImportResult::BadResponse => { + return Err(BadPeer(*who, rep::VERIFICATION_FAIL)) + }, } } else if blocks.is_empty() { debug!(target: "sync", "Empty block response from {}", who); - return Err(BadPeer(*who, rep::NO_BLOCK)) + return Err(BadPeer(*who, rep::NO_BLOCK)); } else { debug!( target: "sync", @@ -1087,7 +1091,7 @@ where blocks.len(), who, ); - return Err(BadPeer(*who, rep::NOT_REQUESTED)) + return Err(BadPeer(*who, rep::NOT_REQUESTED)); } } else { debug!( @@ -1095,13 +1099,13 @@ where "Logic error: we think we are downloading warp target block from {}, but no warp sync is happening.", who, ); - return Ok(OnBlockData::Continue) + return Ok(OnBlockData::Continue); } }, - PeerSyncState::Available | - PeerSyncState::DownloadingJustification(..) | - PeerSyncState::DownloadingState | - PeerSyncState::DownloadingWarpProof => Vec::new(), + PeerSyncState::Available + | PeerSyncState::DownloadingJustification(..) + | PeerSyncState::DownloadingState + | PeerSyncState::DownloadingWarpProof => Vec::new(), } } else { // When request.is_none() this is a block announcement. Just accept blocks. @@ -1129,7 +1133,7 @@ where } } else { // We don't know of this peer, so we also did not request anything from it. - return Err(BadPeer(*who, rep::NOT_REQUESTED)) + return Err(BadPeer(*who, rep::NOT_REQUESTED)); }; Ok(self.validate_and_queue_blocks(new_blocks, gap)) @@ -1175,7 +1179,7 @@ where sync.import_state(*response) } else { debug!(target: "sync", "Ignored obsolete state response from {}", who); - return Err(BadPeer(*who, rep::NOT_REQUESTED)) + return Err(BadPeer(*who, rep::NOT_REQUESTED)); }; match import_result { @@ -1221,7 +1225,7 @@ where sync.import_warp_proof(response) } else { debug!(target: "sync", "Ignored obsolete warp sync response from {}", who); - return Err(BadPeer(*who, rep::NOT_REQUESTED)) + return Err(BadPeer(*who, rep::NOT_REQUESTED)); }; match import_result { @@ -1242,7 +1246,7 @@ where peer } else { error!(target: "sync", "💔 Called on_block_justification with a bad peer ID"); - return Ok(OnBlockJustification::Nothing) + return Ok(OnBlockJustification::Nothing); }; self.allowed_requests.add(&who); @@ -1259,7 +1263,7 @@ where hash, block.hash, ); - return Err(BadPeer(who, rep::BAD_JUSTIFICATION)) + return Err(BadPeer(who, rep::BAD_JUSTIFICATION)); } block @@ -1281,7 +1285,7 @@ where if let Some((peer, hash, number, j)) = self.extra_justifications.on_response(who, justification) { - return Ok(OnBlockJustification::Import { peer, hash, number, justifications: j }) + return Ok(OnBlockJustification::Import { peer, hash, number, justifications: j }); } } @@ -1308,7 +1312,7 @@ where } for (result, hash) in results { if has_error { - break + break; } if result.is_err() { @@ -1316,10 +1320,11 @@ where } match result { - Ok(BlockImportStatus::ImportedKnown(number, who)) => + Ok(BlockImportStatus::ImportedKnown(number, who)) => { if let Some(peer) = who { self.update_peer_common_number(&peer, number); - }, + } + }, Ok(BlockImportStatus::ImportedUnknown(number, aux, who)) => { if aux.clear_justification_requests { trace!( @@ -1387,7 +1392,7 @@ where self.gap_sync = None; } }, - Err(BlockImportError::IncompleteHeader(who)) => + Err(BlockImportError::IncompleteHeader(who)) => { if let Some(peer) = who { warn!( target: "sync", @@ -1395,8 +1400,9 @@ where ); output.push(Err(BadPeer(peer, rep::INCOMPLETE_HEADER))); output.extend(self.restart()); - }, - Err(BlockImportError::VerificationFailed(who, e)) => + } + }, + Err(BlockImportError::VerificationFailed(who, e)) => { if let Some(peer) = who { warn!( target: "sync", @@ -1407,8 +1413,9 @@ where ); output.push(Err(BadPeer(peer, rep::VERIFICATION_FAIL))); output.extend(self.restart()); - }, - Err(BlockImportError::BadBlock(who)) => + } + }, + Err(BlockImportError::BadBlock(who)) => { if let Some(peer) = who { warn!( target: "sync", @@ -1417,7 +1424,8 @@ where peer, ); output.push(Err(BadPeer(peer, rep::BAD_BLOCK))); - }, + } + }, Err(BlockImportError::MissingState) => { // This may happen if the chain we were requesting upon has been discarded // in the meantime because other chain has been finalized. @@ -1517,7 +1525,7 @@ where } .boxed(), ); - return + return; } // Check if there is a slot for this block announce validation. @@ -1537,7 +1545,7 @@ where } .boxed(), ); - return + return; }, HasSlotForBlockAnnounceValidation::MaximumPeerSlotsReached => { self.block_announce_validation.push(async move { @@ -1550,7 +1558,7 @@ where ); PreValidateBlockAnnounce::Skip }.boxed()); - return + return; }, } @@ -1633,8 +1641,9 @@ where fields: request.fields.to_be_u32(), from_block: match request.from { FromBlock::Hash(h) => Some(schema::v1::block_request::FromBlock::Hash(h.encode())), - FromBlock::Number(n) => - Some(schema::v1::block_request::FromBlock::Number(n.encode())), + FromBlock::Number(n) => { + Some(schema::v1::block_request::FromBlock::Number(n.encode())) + }, }, direction: request.direction as i32, max_blocks: request.max.unwrap_or(0), @@ -1825,15 +1834,18 @@ where fn required_block_attributes(&self) -> BlockAttributes { match self.mode { - SyncMode::Full => - BlockAttributes::HEADER | BlockAttributes::JUSTIFICATION | BlockAttributes::BODY, + SyncMode::Full => { + BlockAttributes::HEADER | BlockAttributes::JUSTIFICATION | BlockAttributes::BODY + }, SyncMode::Light => BlockAttributes::HEADER | BlockAttributes::JUSTIFICATION, - SyncMode::LightState { storage_chain_mode: false, .. } | SyncMode::Warp => - BlockAttributes::HEADER | BlockAttributes::JUSTIFICATION | BlockAttributes::BODY, - SyncMode::LightState { storage_chain_mode: true, .. } => - BlockAttributes::HEADER | - BlockAttributes::JUSTIFICATION | - BlockAttributes::INDEXED_BODY, + SyncMode::LightState { storage_chain_mode: false, .. } | SyncMode::Warp => { + BlockAttributes::HEADER | BlockAttributes::JUSTIFICATION | BlockAttributes::BODY + }, + SyncMode::LightState { storage_chain_mode: true, .. } => { + BlockAttributes::HEADER + | BlockAttributes::JUSTIFICATION + | BlockAttributes::INDEXED_BODY + }, } } @@ -1910,7 +1922,7 @@ where for (n, peer) in self.peers.iter_mut() { if let PeerSyncState::AncestorSearch { .. } = peer.state { // Wait for ancestry search to complete first. - continue + continue; } let new_common_number = if peer.best_number >= number { number } else { peer.best_number }; @@ -1945,7 +1957,7 @@ where peer: &PeerId, ) -> HasSlotForBlockAnnounceValidation { if self.block_announce_validation.len() >= MAX_CONCURRENT_BLOCK_ANNOUNCE_VALIDATIONS { - return HasSlotForBlockAnnounceValidation::TotalMaximumSlotsReached + return HasSlotForBlockAnnounceValidation::TotalMaximumSlotsReached; } match self.block_announce_validation_per_peer_stats.entry(*peer) { @@ -1971,9 +1983,9 @@ where res: &PreValidateBlockAnnounce, ) { let peer = match res { - PreValidateBlockAnnounce::Failure { who, .. } | - PreValidateBlockAnnounce::Process { who, .. } | - PreValidateBlockAnnounce::Error { who } => who, + PreValidateBlockAnnounce::Failure { who, .. } + | PreValidateBlockAnnounce::Process { who, .. } + | PreValidateBlockAnnounce::Error { who } => who, PreValidateBlockAnnounce::Skip => return, }; @@ -2007,16 +2019,17 @@ where who, disconnect, ); - return PollBlockAnnounceValidation::Failure { who, disconnect } + return PollBlockAnnounceValidation::Failure { who, disconnect }; + }, + PreValidateBlockAnnounce::Process { announce, is_new_best, who } => { + (announce, is_new_best, who) }, - PreValidateBlockAnnounce::Process { announce, is_new_best, who } => - (announce, is_new_best, who), PreValidateBlockAnnounce::Error { .. } | PreValidateBlockAnnounce::Skip => { debug!( target: "sync", "Ignored announce validation", ); - return PollBlockAnnounceValidation::Skip + return PollBlockAnnounceValidation::Skip; }, }; @@ -2040,12 +2053,12 @@ where peer } else { error!(target: "sync", "💔 Called on_block_announce with a bad peer ID"); - return PollBlockAnnounceValidation::Nothing { is_best, who, announce } + return PollBlockAnnounceValidation::Nothing { is_best, who, announce }; }; if let PeerSyncState::AncestorSearch { .. } = peer.state { trace!(target: "sync", "Peer state is ancestor search."); - return PollBlockAnnounceValidation::Nothing { is_best, who, announce } + return PollBlockAnnounceValidation::Nothing { is_best, who, announce }; } if is_best { @@ -2059,8 +2072,8 @@ where if is_best { if known && self.best_queued_number >= number { self.update_peer_common_number(&who, number); - } else if announce.header.parent_hash() == &self.best_queued_hash || - known_parent && self.best_queued_number >= number + } else if announce.header.parent_hash() == &self.best_queued_hash + || known_parent && self.best_queued_number >= number { self.update_peer_common_number(&who, number - One::one()); } @@ -2073,7 +2086,7 @@ where if let Some(target) = self.fork_targets.get_mut(&hash) { target.peers.insert(who); } - return PollBlockAnnounceValidation::Nothing { is_best, who, announce } + return PollBlockAnnounceValidation::Nothing { is_best, who, announce }; } if ancient_parent { @@ -2084,7 +2097,7 @@ where hash, announce.header, ); - return PollBlockAnnounceValidation::Nothing { is_best, who, announce } + return PollBlockAnnounceValidation::Nothing { is_best, who, announce }; } let requires_additional_data = self.mode != SyncMode::Light || !known_parent; @@ -2096,7 +2109,7 @@ where hash, announce.header, ); - return PollBlockAnnounceValidation::ImportHeader { is_best, announce, who } + return PollBlockAnnounceValidation::ImportHeader { is_best, announce, who }; } if self.status().state == SyncState::Idle { @@ -2140,7 +2153,7 @@ where // We make sure our commmon number is at least something we have. p.common_number = self.best_queued_number; self.peers.insert(id, p); - return None + return None; } // handle peers that were in other states. @@ -2174,9 +2187,9 @@ where self.best_queued_hash = info.best_hash; self.best_queued_number = info.best_number; - if self.mode == SyncMode::Full && - self.client.block_status(&BlockId::hash(info.best_hash))? != - BlockStatus::InChainWithState + if self.mode == SyncMode::Full + && self.client.block_status(&BlockId::hash(info.best_hash))? + != BlockStatus::InChainWithState { self.import_existing = true; // Latest state is missing, start with the last finalized state or genesis instead. @@ -2206,7 +2219,7 @@ where /// What is the status of the block corresponding to the given hash? fn block_status(&self, hash: &B::Hash) -> Result { if self.queue_blocks.contains(hash) { - return Ok(BlockStatus::Queued) + return Ok(BlockStatus::Queued); } self.client.block_status(&BlockId::Hash(*hash)) } @@ -2252,14 +2265,15 @@ where /// Generate block request for downloading of the target block body during warp sync. fn warp_target_block_request(&mut self) -> Option<(PeerId, BlockRequest)> { if let Some(sync) = &self.warp_sync { - if self.allowed_requests.is_empty() || - sync.is_complete() || - self.peers + if self.allowed_requests.is_empty() + || sync.is_complete() + || self + .peers .iter() .any(|(_, peer)| peer.state == PeerSyncState::DownloadingWarpTargetBlock) { // Only one pending warp target block request is allowed. - return None + return None; } if let Some((target_number, request)) = sync.next_target_block_request() { // Find a random peer that has a block with the target number. @@ -2268,7 +2282,7 @@ where trace!(target: "sync", "New warp target block request for {}", id); peer.state = PeerSyncState::DownloadingWarpTargetBlock; self.allowed_requests.clear(); - return Some((*id, request)) + return Some((*id, request)); } } } @@ -2377,7 +2391,7 @@ fn handle_ancestor_search_state( if block_hash_match && next_distance_to_tip == One::one() { // We found the ancestor in the first step so there is no need to execute binary // search. - return None + return None; } if block_hash_match { let left = curr_block_num; @@ -2396,7 +2410,7 @@ fn handle_ancestor_search_state( }, AncestorSearchState::BinarySearch(mut left, mut right) => { if left >= curr_block_num { - return None + return None; } if block_hash_match { left = curr_block_num; @@ -2426,7 +2440,7 @@ fn peer_block_request( ) -> Option<(Range>, BlockRequest)> { if best_num >= peer.best_number { // Will be downloaded as alternative fork instead. - return None + return None; } else if peer.common_number < finalized { trace!( target: "sync", @@ -2507,22 +2521,22 @@ fn fork_sync_request( targets.retain(|hash, r| { if r.number <= finalized { trace!(target: "sync", "Removed expired fork sync request {:?} (#{})", hash, r.number); - return false + return false; } if check_block(hash) != BlockStatus::Unknown { trace!(target: "sync", "Removed obsolete fork sync request {:?} (#{})", hash, r.number); - return false + return false; } true }); for (hash, r) in targets { if !r.peers.contains(&id) { - continue + continue; } // Download the fork only if it is behind or not too far ahead our tip of the chain // Otherwise it should be downloaded in full sync mode. - if r.number <= best_num || - (r.number - best_num).saturated_into::() < MAX_BLOCKS_TO_REQUEST as u32 + if r.number <= best_num + || (r.number - best_num).saturated_into::() < MAX_BLOCKS_TO_REQUEST as u32 { let parent_status = r.parent_hash.as_ref().map_or(BlockStatus::Unknown, check_block); let count = if parent_status == BlockStatus::Unknown { @@ -2541,7 +2555,7 @@ fn fork_sync_request( direction: Direction::Descending, max: Some(count), }, - )) + )); } else { trace!(target: "sync", "Fork too far in the future: {:?} (#{})", hash, r.number); } @@ -2560,7 +2574,7 @@ where T: HeaderMetadata + ?Sized, { if base == block { - return Ok(false) + return Ok(false); } let ancestor = sp_blockchain::lowest_common_ancestor(client, *block, *base)?; @@ -2587,7 +2601,7 @@ fn validate_blocks( blocks.len(), ); - return Err(BadPeer(*who, rep::NOT_REQUESTED)) + return Err(BadPeer(*who, rep::NOT_REQUESTED)); } let block_header = @@ -2607,11 +2621,11 @@ fn validate_blocks( block_header, ); - return Err(BadPeer(*who, rep::NOT_REQUESTED)) + return Err(BadPeer(*who, rep::NOT_REQUESTED)); } - if request.fields.contains(BlockAttributes::HEADER) && - blocks.iter().any(|b| b.header.is_none()) + if request.fields.contains(BlockAttributes::HEADER) + && blocks.iter().any(|b| b.header.is_none()) { trace!( target: "sync", @@ -2619,7 +2633,7 @@ fn validate_blocks( who, ); - return Err(BadPeer(*who, rep::BAD_RESPONSE)) + return Err(BadPeer(*who, rep::BAD_RESPONSE)); } if request.fields.contains(BlockAttributes::BODY) && blocks.iter().any(|b| b.body.is_none()) @@ -2630,7 +2644,7 @@ fn validate_blocks( who, ); - return Err(BadPeer(*who, rep::BAD_RESPONSE)) + return Err(BadPeer(*who, rep::BAD_RESPONSE)); } } @@ -2645,7 +2659,7 @@ fn validate_blocks( b.hash, hash, ); - return Err(BadPeer(*who, rep::BAD_BLOCK)) + return Err(BadPeer(*who, rep::BAD_BLOCK)); } } if let (Some(header), Some(body)) = (&b.header, &b.body) { @@ -2663,7 +2677,7 @@ fn validate_blocks( expected, got, ); - return Err(BadPeer(*who, rep::BAD_BLOCK)) + return Err(BadPeer(*who, rep::BAD_BLOCK)); } } } @@ -2798,9 +2812,9 @@ mod test { // the justification request should be scheduled to the // new peer which is at the given block assert!(sync.justification_requests().any(|(p, r)| { - p == peer_id3 && - r.fields == BlockAttributes::JUSTIFICATION && - r.from == FromBlock::Hash(b1_hash) + p == peer_id3 + && r.fields == BlockAttributes::JUSTIFICATION + && r.from == FromBlock::Hash(b1_hash) })); assert_eq!( @@ -2845,7 +2859,7 @@ mod test { // Poll until we have procssed the block announcement block_on(poll_fn(|cx| loop { if sync.poll_block_announce_validation(cx).is_pending() { - break Poll::Ready(()) + break Poll::Ready(()); } })) } @@ -3226,7 +3240,7 @@ mod test { request } else { // We found the ancenstor - break + break; }; log::trace!(target: "sync", "Request: {:?}", request); @@ -3358,7 +3372,7 @@ mod test { request } else { // We found the ancenstor - break + break; }; log::trace!(target: "sync", "Request: {:?}", request); diff --git a/client/network/sync/src/service/network.rs b/client/network/sync/src/service/network.rs index 44ed177661264..d10517fd0e7c9 100644 --- a/client/network/sync/src/service/network.rs +++ b/client/network/sync/src/service/network.rs @@ -81,10 +81,12 @@ impl NetworkServiceProvider { pub async fn run(mut self, service: Arc) { while let Some(inner) = self.rx.next().await { match inner { - ToServiceCommand::DisconnectPeer(peer, protocol_name) => - service.disconnect_peer(peer, protocol_name), - ToServiceCommand::ReportPeer(peer, reputation_change) => - service.report_peer(peer, reputation_change), + ToServiceCommand::DisconnectPeer(peer, protocol_name) => { + service.disconnect_peer(peer, protocol_name) + }, + ToServiceCommand::ReportPeer(peer, reputation_change) => { + service.report_peer(peer, reputation_change) + }, } } } diff --git a/client/network/sync/src/state.rs b/client/network/sync/src/state.rs index 9f64b52334c8a..35bb92c5c5a45 100644 --- a/client/network/sync/src/state.rs +++ b/client/network/sync/src/state.rs @@ -90,11 +90,11 @@ where pub fn import(&mut self, response: StateResponse) -> ImportResult { if response.entries.is_empty() && response.proof.is_empty() { debug!(target: "sync", "Bad state response"); - return ImportResult::BadResponse + return ImportResult::BadResponse; } if !self.skip_proof && response.proof.is_empty() { debug!(target: "sync", "Missing proof"); - return ImportResult::BadResponse + return ImportResult::BadResponse; } let complete = if !self.skip_proof { debug!(target: "sync", "Importing state from {} trie nodes", response.proof.len()); @@ -103,7 +103,7 @@ where Ok(proof) => proof, Err(e) => { debug!(target: "sync", "Error decoding proof: {:?}", e); - return ImportResult::BadResponse + return ImportResult::BadResponse; }, }; let (values, completed) = match self.client.verify_range_proof( @@ -117,7 +117,7 @@ where "StateResponse failed proof verification: {}", e, ); - return ImportResult::BadResponse + return ImportResult::BadResponse; }, Ok(values) => values, }; diff --git a/client/network/sync/src/warp.rs b/client/network/sync/src/warp.rs index ab8a7c66b9856..5a617060f5da2 100644 --- a/client/network/sync/src/warp.rs +++ b/client/network/sync/src/warp.rs @@ -131,7 +131,7 @@ where log::debug!(target: "sync", "Unexpected target block response"); TargetBlockImportResult::BadResponse }, - Phase::TargetBlock(header) => + Phase::TargetBlock(header) => { if let Some(block_header) = &block.header { if block_header == header { if block.body.is_some() { @@ -161,7 +161,8 @@ where } else { log::debug!(target: "sync", "Importing target block failed: missing header."); TargetBlockImportResult::BadResponse - }, + } + }, } } @@ -190,8 +191,8 @@ where Phase::TargetBlock(header) => { let request = BlockRequest:: { id: 0, - fields: BlockAttributes::HEADER | - BlockAttributes::BODY | BlockAttributes::JUSTIFICATION, + fields: BlockAttributes::HEADER + | BlockAttributes::BODY | BlockAttributes::JUSTIFICATION, from: FromBlock::Hash(header.hash()), direction: Direction::Ascending, max: Some(1), diff --git a/client/network/test/src/lib.rs b/client/network/test/src/lib.rs index 035fc0a972a59..f7b8762a9a086 100644 --- a/client/network/test/src/lib.rs +++ b/client/network/test/src/lib.rs @@ -872,11 +872,12 @@ where let (chain_sync, chain_sync_service) = ChainSync::new( match network_config.sync_mode { SyncMode::Full => sc_network_common::sync::SyncMode::Full, - SyncMode::Fast { skip_proofs, storage_chain_mode } => + SyncMode::Fast { skip_proofs, storage_chain_mode } => { sc_network_common::sync::SyncMode::LightState { skip_proofs, storage_chain_mode, - }, + } + }, SyncMode::Warp => sc_network_common::sync::SyncMode::Warp, }, client.clone(), @@ -966,10 +967,10 @@ where let mut highest = None; for peer in self.peers().iter() { if peer.is_major_syncing() || peer.network.num_queued_blocks() != 0 { - return Poll::Pending + return Poll::Pending; } if peer.network.num_sync_requests() != 0 { - return Poll::Pending + return Poll::Pending; } match (highest, peer.client.info().best_hash) { (None, b) => highest = Some(b), @@ -988,10 +989,10 @@ where for peer in self.peers().iter() { if peer.is_major_syncing() || peer.network.num_queued_blocks() != 0 { - return Poll::Pending + return Poll::Pending; } if peer.network.num_sync_requests() != 0 { - return Poll::Pending + return Poll::Pending; } } Poll::Ready(()) @@ -1005,7 +1006,7 @@ where let num_peers = self.peers().len(); if self.peers().iter().all(|p| p.num_peers() == num_peers - 1) { - return Poll::Ready(()) + return Poll::Ready(()); } Poll::Pending diff --git a/client/network/test/src/sync.rs b/client/network/test/src/sync.rs index 4515677d0b1e0..887cdae42feec 100644 --- a/client/network/test/src/sync.rs +++ b/client/network/test/src/sync.rs @@ -48,7 +48,7 @@ fn sync_peers_works() { net.poll(cx); for peer in 0..3 { if net.peer(peer).num_peers() != 2 { - return Poll::Pending + return Poll::Pending; } } Poll::Ready(()) @@ -74,12 +74,12 @@ fn sync_cycle_from_offline_to_syncing_to_offline() { for peer in 0..3 { // Online if net.peer(peer).is_offline() { - return Poll::Pending + return Poll::Pending; } if peer < 2 { // Major syncing. if net.peer(peer).blocks_count() < 100 && !net.peer(peer).is_major_syncing() { - return Poll::Pending + return Poll::Pending; } } } @@ -91,7 +91,7 @@ fn sync_cycle_from_offline_to_syncing_to_offline() { net.poll(cx); for peer in 0..3 { if net.peer(peer).is_major_syncing() { - return Poll::Pending + return Poll::Pending; } } Poll::Ready(()) @@ -274,15 +274,15 @@ fn sync_justifications() { net.poll(cx); for hash in [hashof10, hashof15, hashof20] { - if net.peer(0).client().justifications(hash).unwrap() != - Some(Justifications::from((*b"FRNK", Vec::new()))) + if net.peer(0).client().justifications(hash).unwrap() + != Some(Justifications::from((*b"FRNK", Vec::new()))) { - return Poll::Pending + return Poll::Pending; } - if net.peer(1).client().justifications(hash).unwrap() != - Some(Justifications::from((*b"FRNK", Vec::new()))) + if net.peer(1).client().justifications(hash).unwrap() + != Some(Justifications::from((*b"FRNK", Vec::new()))) { - return Poll::Pending + return Poll::Pending; } } @@ -313,10 +313,10 @@ fn sync_justifications_across_forks() { block_on(futures::future::poll_fn::<(), _>(|cx| { net.poll(cx); - if net.peer(0).client().justifications(f1_best).unwrap() == - Some(Justifications::from((*b"FRNK", Vec::new()))) && - net.peer(1).client().justifications(f1_best).unwrap() == - Some(Justifications::from((*b"FRNK", Vec::new()))) + if net.peer(0).client().justifications(f1_best).unwrap() + == Some(Justifications::from((*b"FRNK", Vec::new()))) + && net.peer(1).client().justifications(f1_best).unwrap() + == Some(Justifications::from((*b"FRNK", Vec::new()))) { Poll::Ready(()) } else { @@ -429,7 +429,7 @@ fn can_sync_small_non_best_forks() { assert!(net.peer(0).client().header(&BlockId::Hash(small_hash)).unwrap().is_some()); if net.peer(1).client().header(&BlockId::Hash(small_hash)).unwrap().is_none() { - return Poll::Pending + return Poll::Pending; } Poll::Ready(()) })); @@ -440,7 +440,7 @@ fn can_sync_small_non_best_forks() { block_on(futures::future::poll_fn::<(), _>(|cx| { net.poll(cx); if net.peer(1).client().header(&BlockId::Hash(another_fork)).unwrap().is_none() { - return Poll::Pending + return Poll::Pending; } Poll::Ready(()) })); @@ -472,7 +472,7 @@ fn can_sync_forks_ahead_of_the_best_chain() { net.poll(cx); if net.peer(1).client().header(&BlockId::Hash(fork_hash)).unwrap().is_none() { - return Poll::Pending + return Poll::Pending; } Poll::Ready(()) })); @@ -526,7 +526,7 @@ fn can_sync_explicit_forks() { assert!(net.peer(0).client().header(&BlockId::Hash(small_hash)).unwrap().is_some()); if net.peer(1).client().header(&BlockId::Hash(small_hash)).unwrap().is_none() { - return Poll::Pending + return Poll::Pending; } Poll::Ready(()) })); @@ -877,9 +877,9 @@ fn block_announce_data_is_propagated() { // Wait until peer 1 is connected to both nodes. block_on(futures::future::poll_fn::<(), _>(|cx| { net.poll(cx); - if net.peer(1).num_peers() == 2 && - net.peer(0).num_peers() == 1 && - net.peer(2).num_peers() == 1 + if net.peer(1).num_peers() == 2 + && net.peer(0).num_peers() == 1 + && net.peer(2).num_peers() == 1 { Poll::Ready(()) } else { @@ -983,10 +983,10 @@ fn multiple_requests_are_accepted_as_long_as_they_are_not_fulfilled() { block_on(futures::future::poll_fn::<(), _>(|cx| { net.poll(cx); - if net.peer(1).client().justifications(hashof10).unwrap() != - Some(Justifications::from((*b"FRNK", Vec::new()))) + if net.peer(1).client().justifications(hashof10).unwrap() + != Some(Justifications::from((*b"FRNK", Vec::new()))) { - return Poll::Pending + return Poll::Pending; } Poll::Ready(()) @@ -1010,7 +1010,7 @@ fn syncs_all_forks_from_single_peer() { block_on(futures::future::poll_fn::<(), _>(|cx| { net.poll(cx); if net.peer(1).network().best_seen_block() != Some(12) { - return Poll::Pending + return Poll::Pending; } Poll::Ready(()) })); diff --git a/client/network/transactions/src/lib.rs b/client/network/transactions/src/lib.rs index 5239a94ef23f3..0e6287923bf5c 100644 --- a/client/network/transactions/src/lib.rs +++ b/client/network/transactions/src/lib.rs @@ -102,7 +102,7 @@ impl Future for PendingTransaction { let mut this = self.project(); if let Poll::Ready(import_result) = Pin::new(&mut this.validation).poll_unpin(cx) { - return Poll::Ready((this.tx_hash.clone(), import_result)) + return Poll::Ready((this.tx_hash.clone(), import_result)); } Poll::Pending @@ -341,7 +341,7 @@ where Event::NotificationsReceived { remote, messages } => { for (protocol, message) in messages { if protocol != self.protocol_name { - continue + continue; } if let Ok(m) = @@ -364,7 +364,7 @@ where // Accept transactions only when node is not major syncing if self.service.is_major_syncing() { trace!(target: "sync", "{} Ignoring transactions while major syncing", who); - return + return; } trace!(target: "sync", "Received {} transactions from {}", transactions.len(), who); @@ -376,7 +376,7 @@ where "Ignoring any further transactions that exceed `MAX_PENDING_TRANSACTIONS`({}) limit", MAX_PENDING_TRANSACTIONS, ); - break + break; } let hash = self.transaction_pool.hash_of(&t); @@ -402,8 +402,9 @@ where fn on_handle_transaction_import(&mut self, who: PeerId, import: TransactionImport) { match import { - TransactionImport::KnownGood => - self.service.report_peer(who, rep::ANY_TRANSACTION_REFUND), + TransactionImport::KnownGood => { + self.service.report_peer(who, rep::ANY_TRANSACTION_REFUND) + }, TransactionImport::NewGood => self.service.report_peer(who, rep::GOOD_TRANSACTION), TransactionImport::Bad => self.service.report_peer(who, rep::BAD_TRANSACTION), TransactionImport::None => {}, @@ -414,7 +415,7 @@ where pub fn propagate_transaction(&mut self, hash: &H) { // Accept transactions only when node is not major syncing if self.service.is_major_syncing() { - return + return; } debug!(target: "sync", "Propagating transaction [{:?}]", hash); @@ -434,7 +435,7 @@ where for (who, peer) in self.peers.iter_mut() { // never send transactions to the light node if matches!(peer.role, ObservedRole::Light) { - continue + continue; } let (hashes, to_send): (Vec<_>, Vec<_>) = transactions @@ -466,7 +467,7 @@ where fn propagate_transactions(&mut self) { // Accept transactions only when node is not major syncing if self.service.is_major_syncing() { - return + return; } debug!(target: "sync", "Propagating transactions"); diff --git a/client/offchain/src/api.rs b/client/offchain/src/api.rs index 7d3dd8302f343..5978a0583b190 100644 --- a/client/offchain/src/api.rs +++ b/client/offchain/src/api.rs @@ -118,8 +118,9 @@ impl offchain::DbExternalities for Db { "CAS", ); match kind { - StorageKind::PERSISTENT => - self.persistent.compare_and_set(STORAGE_PREFIX, key, old_value, new_value), + StorageKind::PERSISTENT => { + self.persistent.compare_and_set(STORAGE_PREFIX, key, old_value, new_value) + }, StorageKind::LOCAL => unavailable_yet(LOCAL_DB), } } diff --git a/client/offchain/src/api/http.rs b/client/offchain/src/api/http.rs index 4c97e5a47058d..05b05ca3e9a26 100644 --- a/client/offchain/src/api/http.rs +++ b/client/offchain/src/api/http.rs @@ -156,7 +156,7 @@ impl HttpApi { target: LOG_TARGET, "Overflow in offchain worker HTTP request ID assignment" ); - return Err(()) + return Err(()); }, }; self.requests @@ -219,7 +219,7 @@ impl HttpApi { future::MaybeDone::Done(Err(_)) => return Err(HttpError::IoError), future::MaybeDone::Future(_) | future::MaybeDone::Gone => { debug_assert!(matches!(deadline, future::MaybeDone::Done(..))); - return Err(HttpError::DeadlineReached) + return Err(HttpError::DeadlineReached); }, }; @@ -248,13 +248,13 @@ impl HttpApi { match poll_sender(&mut sender) { Err(HttpError::IoError) => { tracing::debug!(target: LOG_TARGET, id = %request_id.0, "Encountered io error while trying to add new chunk to body"); - return Err(HttpError::IoError) + return Err(HttpError::IoError); }, other => { tracing::debug!(target: LOG_TARGET, id = %request_id.0, res = ?other, "Added chunk to body"); self.requests .insert(request_id, HttpApiRequest::Dispatched(Some(sender))); - return other + return other; }, } } else { @@ -263,7 +263,7 @@ impl HttpApi { // Writing an empty body is a hint that we should stop writing. Dropping // the sender. self.requests.insert(request_id, HttpApiRequest::Dispatched(None)); - return Ok(()) + return Ok(()); } }, @@ -279,13 +279,13 @@ impl HttpApi { ) { Err(HttpError::IoError) => { tracing::debug!(target: LOG_TARGET, id = %request_id.0, "Encountered io error while trying to add new chunk to body"); - return Err(HttpError::IoError) + return Err(HttpError::IoError); }, other => { tracing::debug!(target: LOG_TARGET, id = %request_id.0, res = ?other, "Added chunk to body"); self.requests .insert(request_id, HttpApiRequest::Response(response)); - return other + return other; }, } } else { @@ -300,7 +300,7 @@ impl HttpApi { ..response }), ); - return Ok(()) + return Ok(()); } }, @@ -309,16 +309,16 @@ impl HttpApi { // If the request has already failed, return without putting back the request // in the list. - return Err(HttpError::IoError) + return Err(HttpError::IoError); }, - v @ HttpApiRequest::Dispatched(None) | - v @ HttpApiRequest::Response(HttpApiRequestRp { sending_body: None, .. }) => { + v @ HttpApiRequest::Dispatched(None) + | v @ HttpApiRequest::Response(HttpApiRequestRp { sending_body: None, .. }) => { tracing::debug!(target: LOG_TARGET, id = %request_id.0, "Body sending already finished"); // We have already finished sending this body. self.requests.insert(request_id, v); - return Err(HttpError::Invalid) + return Err(HttpError::Invalid); }, } } @@ -335,10 +335,10 @@ impl HttpApi { for id in ids { match self.requests.get_mut(id) { Some(HttpApiRequest::NotDispatched(_, _)) => {}, - Some(HttpApiRequest::Dispatched(sending_body)) | - Some(HttpApiRequest::Response(HttpApiRequestRp { sending_body, .. })) => { + Some(HttpApiRequest::Dispatched(sending_body)) + | Some(HttpApiRequest::Response(HttpApiRequestRp { sending_body, .. })) => { let _ = sending_body.take(); - continue + continue; }, _ => continue, }; @@ -403,7 +403,7 @@ impl HttpApi { }, } } - return output + return output; } } @@ -416,7 +416,7 @@ impl HttpApi { msg } else { debug_assert!(matches!(deadline, future::MaybeDone::Done(..))); - continue + continue; } }; @@ -456,7 +456,7 @@ impl HttpApi { None => { tracing::error!(target: "offchain-worker::http", "Worker has crashed"); - return ids.iter().map(|_| HttpRequestStatus::IoError).collect() + return ids.iter().map(|_| HttpRequestStatus::IoError).collect(); }, } } @@ -496,14 +496,14 @@ impl HttpApi { // and we still haven't received a response. Some(rq @ HttpApiRequest::Dispatched(_)) => { self.requests.insert(request_id, rq); - return Err(HttpError::DeadlineReached) + return Err(HttpError::DeadlineReached); }, // The request has failed. Some(HttpApiRequest::Fail { .. }) => return Err(HttpError::IoError), // Request hasn't been dispatched yet; reading the body is invalid. Some(rq @ HttpApiRequest::NotDispatched(_, _)) => { self.requests.insert(request_id, rq); - return Err(HttpError::Invalid) + return Err(HttpError::Invalid); }, None => return Err(HttpError::Invalid), }; @@ -524,12 +524,12 @@ impl HttpApi { ..response }), ); - return Ok(n) + return Ok(n); }, Err(err) => { // This code should never be reached unless there's a logic error somewhere. tracing::error!(target: "offchain-worker::http", "Failed to read from current read chunk: {:?}", err); - return Err(HttpError::IoError) + return Err(HttpError::IoError); }, } } @@ -550,7 +550,7 @@ impl HttpApi { if let future::MaybeDone::Done(_) = deadline { self.requests.insert(request_id, HttpApiRequest::Response(response)); - return Err(HttpError::DeadlineReached) + return Err(HttpError::DeadlineReached); } } } @@ -565,8 +565,9 @@ impl fmt::Debug for HttpApi { impl fmt::Debug for HttpApiRequest { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match self { - HttpApiRequest::NotDispatched(_, _) => - f.debug_tuple("HttpApiRequest::NotDispatched").finish(), + HttpApiRequest::NotDispatched(_, _) => { + f.debug_tuple("HttpApiRequest::NotDispatched").finish() + }, HttpApiRequest::Dispatched(_) => f.debug_tuple("HttpApiRequest::Dispatched").finish(), HttpApiRequest::Response(HttpApiRequestRp { status_code, headers, .. }) => f .debug_tuple("HttpApiRequest::Response") @@ -661,12 +662,12 @@ impl Future for HttpWorker { let response = match Future::poll(Pin::new(&mut future), cx) { Poll::Pending => { me.requests.push((id, HttpWorkerRequest::Dispatched(future))); - continue + continue; }, Poll::Ready(Ok(response)) => response, Poll::Ready(Err(error)) => { let _ = me.to_api.unbounded_send(WorkerToApi::Fail { id, error }); - continue // don't insert the request back + continue; // don't insert the request back }, }; @@ -684,7 +685,7 @@ impl Future for HttpWorker { me.requests.push((id, HttpWorkerRequest::ReadBody { body, tx: body_tx })); cx.waker().wake_by_ref(); // reschedule in order to poll the new future - continue + continue; }, HttpWorkerRequest::ReadBody { mut body, mut tx } => { @@ -695,7 +696,7 @@ impl Future for HttpWorker { Poll::Ready(Err(_)) => continue, // don't insert the request back Poll::Pending => { me.requests.push((id, HttpWorkerRequest::ReadBody { body, tx })); - continue + continue; }, } @@ -744,10 +745,12 @@ impl fmt::Debug for HttpWorker { impl fmt::Debug for HttpWorkerRequest { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match self { - HttpWorkerRequest::Dispatched(_) => - f.debug_tuple("HttpWorkerRequest::Dispatched").finish(), - HttpWorkerRequest::ReadBody { .. } => - f.debug_tuple("HttpWorkerRequest::Response").finish(), + HttpWorkerRequest::Dispatched(_) => { + f.debug_tuple("HttpWorkerRequest::Dispatched").finish() + }, + HttpWorkerRequest::ReadBody { .. } => { + f.debug_tuple("HttpWorkerRequest::Response").finish() + }, } } } diff --git a/client/offchain/src/api/timestamp.rs b/client/offchain/src/api/timestamp.rs index 4b3f5efddf275..ae144f392d3ca 100644 --- a/client/offchain/src/api/timestamp.rs +++ b/client/offchain/src/api/timestamp.rs @@ -60,8 +60,9 @@ pub fn deadline_to_future( future::maybe_done(match deadline.map(timestamp_from_now) { None => Either::Left(future::pending()), // Only apply delay if we need to wait a non-zero duration - Some(duration) if duration <= Duration::from_secs(0) => - Either::Right(Either::Left(future::ready(()))), + Some(duration) if duration <= Duration::from_secs(0) => { + Either::Right(Either::Left(future::ready(()))) + }, Some(duration) => Either::Right(Either::Right(futures_timer::Delay::new(duration))), }) } diff --git a/client/peerset/src/lib.rs b/client/peerset/src/lib.rs index ec09835c4898e..279def1259fc2 100644 --- a/client/peerset/src/lib.rs +++ b/client/peerset/src/lib.rs @@ -324,7 +324,7 @@ impl Peerset { fn on_add_reserved_peer(&mut self, set_id: SetId, peer_id: PeerId) { let newly_inserted = self.reserved_nodes[set_id.0].0.insert(peer_id); if !newly_inserted { - return + return; } self.data.add_no_slot_node(set_id.0, peer_id); @@ -333,14 +333,14 @@ impl Peerset { fn on_remove_reserved_peer(&mut self, set_id: SetId, peer_id: PeerId) { if !self.reserved_nodes[set_id.0].0.remove(&peer_id) { - return + return; } self.data.remove_no_slot_node(set_id.0, &peer_id); // Nothing more to do if not in reserved-only mode. if !self.reserved_nodes[set_id.0].1 { - return + return; } // If, however, the peerset is in reserved-only mode, then the removed node needs to be @@ -384,7 +384,7 @@ impl Peerset { self.data.connected_peers(set_id.0).cloned().collect::>().into_iter() { if self.reserved_nodes[set_id.0].0.contains(&peer_id) { - continue + continue; } let peer = self.data.peer(set_id.0, &peer_id).into_connected().expect( @@ -417,7 +417,7 @@ impl Peerset { fn on_remove_from_peers_set(&mut self, set_id: SetId, peer_id: PeerId) { // Don't do anything if node is reserved. if self.reserved_nodes[set_id.0].0.contains(&peer_id) { - return + return; } match self.data.peer(set_id.0, &peer_id) { @@ -442,7 +442,7 @@ impl Peerset { trace!(target: "peerset", "Report {}: {:+} to {}. Reason: {}", peer_id, change.value, reputation.reputation(), change.reason ); - return + return; } debug!(target: "peerset", "Report {}: {:+} to {}. Reason: {}, Disconnecting", @@ -509,7 +509,7 @@ impl Peerset { peer_reputation.set_reputation(after); if after != 0 { - continue + continue; } drop(peer_reputation); @@ -550,7 +550,7 @@ impl Peerset { // remove that check. If necessary, the peerset should be refactored to give more // control over what happens in that situation. if entry.reputation() < BANNED_THRESHOLD { - break + break; } match entry.try_outgoing() { @@ -574,7 +574,7 @@ impl Peerset { // Nothing more to do if we're in reserved mode. if self.reserved_nodes[set_id.0].1 { - return + return; } // Try to grab the next node to attempt to connect to. @@ -588,7 +588,7 @@ impl Peerset { // Don't connect to nodes with an abysmal reputation. if next.reputation() < BANNED_THRESHOLD { - break + break; } match next.try_outgoing() { @@ -599,7 +599,7 @@ impl Peerset { // This branch can only be entered if there is no free slot, which is // checked above. debug_assert!(false); - break + break; }, } } @@ -621,7 +621,7 @@ impl Peerset { if self.reserved_nodes[set_id.0].1 && !self.reserved_nodes[set_id.0].0.contains(&peer_id) { self.message_queue.push_back(Message::Reject(index)); - return + return; } let not_connected = match self.data.peer(set_id.0, &peer_id) { @@ -636,7 +636,7 @@ impl Peerset { if not_connected.reputation() < BANNED_THRESHOLD { self.message_queue.push_back(Message::Reject(index)); - return + return; } match not_connected.try_accept_incoming() { @@ -725,7 +725,7 @@ impl Stream for Peerset { fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll> { loop { if let Some(message) = self.message_queue.pop_front() { - return Poll::Ready(Some(message)) + return Poll::Ready(Some(message)); } if Future::poll(Pin::new(&mut self.next_periodic_alloc_slots), cx).is_ready() { @@ -743,21 +743,28 @@ impl Stream for Peerset { }; match action { - Action::AddReservedPeer(set_id, peer_id) => - self.on_add_reserved_peer(set_id, peer_id), - Action::RemoveReservedPeer(set_id, peer_id) => - self.on_remove_reserved_peer(set_id, peer_id), - Action::SetReservedPeers(set_id, peer_ids) => - self.on_set_reserved_peers(set_id, peer_ids), - Action::SetReservedOnly(set_id, reserved) => - self.on_set_reserved_only(set_id, reserved), + Action::AddReservedPeer(set_id, peer_id) => { + self.on_add_reserved_peer(set_id, peer_id) + }, + Action::RemoveReservedPeer(set_id, peer_id) => { + self.on_remove_reserved_peer(set_id, peer_id) + }, + Action::SetReservedPeers(set_id, peer_ids) => { + self.on_set_reserved_peers(set_id, peer_ids) + }, + Action::SetReservedOnly(set_id, reserved) => { + self.on_set_reserved_only(set_id, reserved) + }, Action::ReportPeer(peer_id, score_diff) => self.on_report_peer(peer_id, score_diff), - Action::AddToPeersSet(sets_name, peer_id) => - self.add_to_peers_set(sets_name, peer_id), - Action::RemoveFromPeersSet(sets_name, peer_id) => - self.on_remove_from_peers_set(sets_name, peer_id), - Action::PeerReputation(peer_id, pending_response) => - self.on_peer_reputation(peer_id, pending_response), + Action::AddToPeersSet(sets_name, peer_id) => { + self.add_to_peers_set(sets_name, peer_id) + }, + Action::RemoveFromPeersSet(sets_name, peer_id) => { + self.on_remove_from_peers_set(sets_name, peer_id) + }, + Action::PeerReputation(peer_id, pending_response) => { + self.on_peer_reputation(peer_id, pending_response) + }, } } } diff --git a/client/peerset/src/peersstate.rs b/client/peerset/src/peersstate.rs index c9af5b8e2ccd0..e40364bd5dc0b 100644 --- a/client/peerset/src/peersstate.rs +++ b/client/peerset/src/peersstate.rs @@ -191,10 +191,12 @@ impl PeersState { assert!(set < self.sets.len()); match self.nodes.get_mut(peer_id).map(|p| &p.sets[set]) { - None | Some(MembershipState::NotMember) => - Peer::Unknown(UnknownPeer { parent: self, set, peer_id: Cow::Borrowed(peer_id) }), - Some(MembershipState::In) | Some(MembershipState::Out) => - Peer::Connected(ConnectedPeer { state: self, set, peer_id: Cow::Borrowed(peer_id) }), + None | Some(MembershipState::NotMember) => { + Peer::Unknown(UnknownPeer { parent: self, set, peer_id: Cow::Borrowed(peer_id) }) + }, + Some(MembershipState::In) | Some(MembershipState::Out) => { + Peer::Connected(ConnectedPeer { state: self, set, peer_id: Cow::Borrowed(peer_id) }) + }, Some(MembershipState::NotConnected { .. }) => Peer::NotConnected(NotConnectedPeer { state: self, set, @@ -247,7 +249,7 @@ impl PeersState { .fold(None::<(&PeerId, &mut Node)>, |mut cur_node, to_try| { if let Some(cur_node) = cur_node.take() { if cur_node.1.reputation >= to_try.1.reputation { - return Some(cur_node) + return Some(cur_node); } } Some(to_try) @@ -272,7 +274,7 @@ impl PeersState { pub fn add_no_slot_node(&mut self, set: usize, peer_id: PeerId) { // Reminder: `HashSet::insert` returns false if the node was already in the set if !self.sets[set].no_slot_nodes.insert(peer_id) { - return + return; } if let Some(peer) = self.nodes.get_mut(&peer_id) { @@ -290,7 +292,7 @@ impl PeersState { pub fn remove_no_slot_node(&mut self, set: usize, peer_id: &PeerId) { // Reminder: `HashSet::remove` returns false if the node was already not in the set if !self.sets[set].no_slot_nodes.remove(peer_id) { - return + return; } if let Some(peer) = self.nodes.get_mut(peer_id) { @@ -447,7 +449,7 @@ impl<'a> NotConnectedPeer<'a> { "State inconsistency with {}; not connected after borrow", self.peer_id ); - return Instant::now() + return Instant::now(); }, }; @@ -472,7 +474,7 @@ impl<'a> NotConnectedPeer<'a> { // Note that it is possible for num_out to be strictly superior to the max, in case we were // connected to reserved node then marked them as not reserved. if !self.state.has_free_outgoing_slot(self.set) && !is_no_slot_occupy { - return Err(self) + return Err(self); } if let Some(peer) = self.state.nodes.get_mut(&*self.peer_id) { @@ -498,10 +500,10 @@ impl<'a> NotConnectedPeer<'a> { // Note that it is possible for num_in to be strictly superior to the max, in case we were // connected to reserved node then marked them as not reserved. - if self.state.sets[self.set].num_in >= self.state.sets[self.set].max_in && - !is_no_slot_occupy + if self.state.sets[self.set].num_in >= self.state.sets[self.set].max_in + && !is_no_slot_occupy { - return Err(self) + return Err(self); } if let Some(peer) = self.state.nodes.get_mut(&*self.peer_id) { @@ -542,8 +544,8 @@ impl<'a> NotConnectedPeer<'a> { peer.sets[self.set] = MembershipState::NotMember; // Remove the peer from `self.state.nodes` entirely if it isn't a member of any set. - if peer.reputation == 0 && - peer.sets.iter().all(|set| matches!(set, MembershipState::NotMember)) + if peer.reputation == 0 + && peer.sets.iter().all(|set| matches!(set, MembershipState::NotMember)) { self.state.nodes.remove(&*self.peer_id); } @@ -614,8 +616,8 @@ impl<'a> Reputation<'a> { impl<'a> Drop for Reputation<'a> { fn drop(&mut self) { if let Some(node) = self.node.take() { - if node.get().reputation == 0 && - node.get().sets.iter().all(|set| matches!(set, MembershipState::NotMember)) + if node.get().reputation == 0 + && node.get().sets.iter().all(|set| matches!(set, MembershipState::NotMember)) { node.remove(); } diff --git a/client/peerset/tests/fuzz.rs b/client/peerset/tests/fuzz.rs index 48c5cb341c35a..712822f68cf9e 100644 --- a/client/peerset/tests/fuzz.rs +++ b/client/peerset/tests/fuzz.rs @@ -119,26 +119,28 @@ fn test_once() { }, // If we generate 2, adjust a random reputation. - 2 => + 2 => { if let Some(id) = known_nodes.iter().choose(&mut rng) { let val = Uniform::new_inclusive(i32::MIN, i32::MAX).sample(&mut rng); peerset_handle.report_peer(*id, ReputationChange::new(val, "")); - }, + } + }, // If we generate 3, disconnect from a random node. - 3 => + 3 => { if let Some(id) = connected_nodes.iter().choose(&mut rng).cloned() { connected_nodes.remove(&id); peerset.dropped(SetId::from(0), id, DropReason::Unknown); - }, + } + }, // If we generate 4, connect to a random node. 4 => { if let Some(id) = known_nodes .iter() .filter(|n| { - incoming_nodes.values().all(|m| m != *n) && - !connected_nodes.contains(*n) + incoming_nodes.values().all(|m| m != *n) + && !connected_nodes.contains(*n) }) .choose(&mut rng) { @@ -161,11 +163,12 @@ fn test_once() { reserved_nodes.insert(*id); } }, - 8 => + 8 => { if let Some(id) = reserved_nodes.iter().choose(&mut rng).cloned() { reserved_nodes.remove(&id); peerset_handle.remove_reserved_peer(SetId::from(0), id); - }, + } + }, _ => unreachable!(), } diff --git a/client/rpc-api/src/chain/error.rs b/client/rpc-api/src/chain/error.rs index 670e221cf1cde..30a7ad4e34c5b 100644 --- a/client/rpc-api/src/chain/error.rs +++ b/client/rpc-api/src/chain/error.rs @@ -42,8 +42,9 @@ const BASE_ERROR: i32 = 3000; impl From for JsonRpseeError { fn from(e: Error) -> Self { match e { - Error::Other(message) => - CallError::Custom(ErrorObject::owned(BASE_ERROR + 1, message, None::<()>)).into(), + Error::Other(message) => { + CallError::Custom(ErrorObject::owned(BASE_ERROR + 1, message, None::<()>)).into() + }, e => e.into(), } } diff --git a/client/rpc-api/src/dev/error.rs b/client/rpc-api/src/dev/error.rs index 43fd3325fa598..37ea2a94a9b71 100644 --- a/client/rpc-api/src/dev/error.rs +++ b/client/rpc-api/src/dev/error.rs @@ -51,14 +51,18 @@ impl From for JsonRpseeError { let msg = e.to_string(); match e { - Error::BlockQueryError(_) => - CallError::Custom(ErrorObject::owned(BASE_ERROR + 1, msg, None::<()>)), - Error::BlockExecutionFailed => - CallError::Custom(ErrorObject::owned(BASE_ERROR + 3, msg, None::<()>)), - Error::WitnessCompactionFailed => - CallError::Custom(ErrorObject::owned(BASE_ERROR + 4, msg, None::<()>)), - Error::ProofExtractionFailed => - CallError::Custom(ErrorObject::owned(BASE_ERROR + 5, msg, None::<()>)), + Error::BlockQueryError(_) => { + CallError::Custom(ErrorObject::owned(BASE_ERROR + 1, msg, None::<()>)) + }, + Error::BlockExecutionFailed => { + CallError::Custom(ErrorObject::owned(BASE_ERROR + 3, msg, None::<()>)) + }, + Error::WitnessCompactionFailed => { + CallError::Custom(ErrorObject::owned(BASE_ERROR + 4, msg, None::<()>)) + }, + Error::ProofExtractionFailed => { + CallError::Custom(ErrorObject::owned(BASE_ERROR + 5, msg, None::<()>)) + }, Error::UnsafeRpcCalled(e) => e.into(), } .into() diff --git a/client/rpc-api/src/state/error.rs b/client/rpc-api/src/state/error.rs index b1df64b4789ab..0a6b6bc889d26 100644 --- a/client/rpc-api/src/state/error.rs +++ b/client/rpc-api/src/state/error.rs @@ -60,12 +60,14 @@ const BASE_ERROR: i32 = 4000; impl From for JsonRpseeError { fn from(e: Error) -> Self { match e { - Error::InvalidBlockRange { .. } => + Error::InvalidBlockRange { .. } => { CallError::Custom(ErrorObject::owned(BASE_ERROR + 1, e.to_string(), None::<()>)) - .into(), - Error::InvalidCount { .. } => + .into() + }, + Error::InvalidCount { .. } => { CallError::Custom(ErrorObject::owned(BASE_ERROR + 2, e.to_string(), None::<()>)) - .into(), + .into() + }, e => Self::to_call_error(e), } } diff --git a/client/rpc-api/src/system/error.rs b/client/rpc-api/src/system/error.rs index 777f8c6c6df0b..bfefc348f4117 100644 --- a/client/rpc-api/src/system/error.rs +++ b/client/rpc-api/src/system/error.rs @@ -48,8 +48,9 @@ const MALFORMATTED_PEER_ARG_ERROR: i32 = BASE_ERROR + 2; impl From for JsonRpseeError { fn from(e: Error) -> Self { match e { - Error::NotHealthy(ref h) => - CallError::Custom(ErrorObject::owned(NOT_HEALTHY_ERROR, e.to_string(), Some(h))), + Error::NotHealthy(ref h) => { + CallError::Custom(ErrorObject::owned(NOT_HEALTHY_ERROR, e.to_string(), Some(h))) + }, Error::MalformattedPeerArg(e) => CallError::Custom(ErrorObject::owned( MALFORMATTED_PEER_ARG_ERROR + 2, e, diff --git a/client/rpc-spec-v2/src/transaction/error.rs b/client/rpc-spec-v2/src/transaction/error.rs index 72a5959992f9e..ba7a02928decf 100644 --- a/client/rpc-spec-v2/src/transaction/error.rs +++ b/client/rpc-spec-v2/src/transaction/error.rs @@ -41,10 +41,11 @@ impl From for TransactionEvent { Error::Verification(e) => TransactionEvent::Invalid(TransactionError { error: format!("Verification error: {}", e), }), - Error::Pool(PoolError::InvalidTransaction(InvalidTransaction::Custom(e))) => + Error::Pool(PoolError::InvalidTransaction(InvalidTransaction::Custom(e))) => { TransactionEvent::Invalid(TransactionError { error: format!("Invalid transaction with custom error: {}", e), - }), + }) + }, Error::Pool(PoolError::InvalidTransaction(e)) => { let msg: &str = e.into(); TransactionEvent::Invalid(TransactionError { @@ -57,28 +58,32 @@ impl From for TransactionEvent { error: format!("Unknown transaction validity: {}", msg), }) }, - Error::Pool(PoolError::TemporarilyBanned) => + Error::Pool(PoolError::TemporarilyBanned) => { TransactionEvent::Invalid(TransactionError { error: "Transaction is temporarily banned".into(), - }), - Error::Pool(PoolError::AlreadyImported(_)) => + }) + }, + Error::Pool(PoolError::AlreadyImported(_)) => { TransactionEvent::Invalid(TransactionError { error: "Transaction is already imported".into(), - }), - Error::Pool(PoolError::TooLowPriority { old, new }) => + }) + }, + Error::Pool(PoolError::TooLowPriority { old, new }) => { TransactionEvent::Invalid(TransactionError { error: format!( "The priority of the transactin is too low (pool {} > current {})", old, new ), - }), + }) + }, Error::Pool(PoolError::CycleDetected) => TransactionEvent::Invalid(TransactionError { error: "The transaction contains a cyclic dependency".into(), }), - Error::Pool(PoolError::ImmediatelyDropped) => + Error::Pool(PoolError::ImmediatelyDropped) => { TransactionEvent::Invalid(TransactionError { error: "The transaction could not enter the pool because of the limit".into(), - }), + }) + }, Error::Pool(PoolError::Unactionable) => TransactionEvent::Invalid(TransactionError { error: "Transaction cannot be propagated and the local node does not author blocks" .into(), @@ -87,14 +92,16 @@ impl From for TransactionEvent { error: "Transaction does not provide any tags, so the pool cannot identify it" .into(), }), - Error::Pool(PoolError::InvalidBlockId(_)) => + Error::Pool(PoolError::InvalidBlockId(_)) => { TransactionEvent::Invalid(TransactionError { error: "The provided block ID is not valid".into(), - }), - Error::Pool(PoolError::RejectedFutureTransaction) => + }) + }, + Error::Pool(PoolError::RejectedFutureTransaction) => { TransactionEvent::Invalid(TransactionError { error: "The pool is not accepting future transactions".into(), - }), + }) + }, } } } diff --git a/client/rpc-spec-v2/src/transaction/event.rs b/client/rpc-spec-v2/src/transaction/event.rs index 3c75eaff10fd4..7a9710aaea79a 100644 --- a/client/rpc-spec-v2/src/transaction/event.rs +++ b/client/rpc-spec-v2/src/transaction/event.rs @@ -186,20 +186,27 @@ enum TransactionEventIR { impl From> for TransactionEventIR { fn from(value: TransactionEvent) -> Self { match value { - TransactionEvent::Validated => - TransactionEventIR::NonBlock(TransactionEventNonBlockIR::Validated), - TransactionEvent::Broadcasted(event) => - TransactionEventIR::NonBlock(TransactionEventNonBlockIR::Broadcasted(event)), - TransactionEvent::BestChainBlockIncluded(event) => - TransactionEventIR::Block(TransactionEventBlockIR::BestChainBlockIncluded(event)), - TransactionEvent::Finalized(event) => - TransactionEventIR::Block(TransactionEventBlockIR::Finalized(event)), - TransactionEvent::Error(event) => - TransactionEventIR::NonBlock(TransactionEventNonBlockIR::Error(event)), - TransactionEvent::Invalid(event) => - TransactionEventIR::NonBlock(TransactionEventNonBlockIR::Invalid(event)), - TransactionEvent::Dropped(event) => - TransactionEventIR::NonBlock(TransactionEventNonBlockIR::Dropped(event)), + TransactionEvent::Validated => { + TransactionEventIR::NonBlock(TransactionEventNonBlockIR::Validated) + }, + TransactionEvent::Broadcasted(event) => { + TransactionEventIR::NonBlock(TransactionEventNonBlockIR::Broadcasted(event)) + }, + TransactionEvent::BestChainBlockIncluded(event) => { + TransactionEventIR::Block(TransactionEventBlockIR::BestChainBlockIncluded(event)) + }, + TransactionEvent::Finalized(event) => { + TransactionEventIR::Block(TransactionEventBlockIR::Finalized(event)) + }, + TransactionEvent::Error(event) => { + TransactionEventIR::NonBlock(TransactionEventNonBlockIR::Error(event)) + }, + TransactionEvent::Invalid(event) => { + TransactionEventIR::NonBlock(TransactionEventNonBlockIR::Invalid(event)) + }, + TransactionEvent::Dropped(event) => { + TransactionEventIR::NonBlock(TransactionEventNonBlockIR::Dropped(event)) + }, } } } @@ -209,16 +216,18 @@ impl From> for TransactionEvent { match value { TransactionEventIR::NonBlock(status) => match status { TransactionEventNonBlockIR::Validated => TransactionEvent::Validated, - TransactionEventNonBlockIR::Broadcasted(event) => - TransactionEvent::Broadcasted(event), + TransactionEventNonBlockIR::Broadcasted(event) => { + TransactionEvent::Broadcasted(event) + }, TransactionEventNonBlockIR::Error(event) => TransactionEvent::Error(event), TransactionEventNonBlockIR::Invalid(event) => TransactionEvent::Invalid(event), TransactionEventNonBlockIR::Dropped(event) => TransactionEvent::Dropped(event), }, TransactionEventIR::Block(block) => match block { TransactionEventBlockIR::Finalized(event) => TransactionEvent::Finalized(event), - TransactionEventBlockIR::BestChainBlockIncluded(event) => - TransactionEvent::BestChainBlockIncluded(event), + TransactionEventBlockIR::BestChainBlockIncluded(event) => { + TransactionEvent::BestChainBlockIncluded(event) + }, }, } } diff --git a/client/rpc-spec-v2/src/transaction/transaction.rs b/client/rpc-spec-v2/src/transaction/transaction.rs index e2cf736dff17a..ffafc8626b64d 100644 --- a/client/rpc-spec-v2/src/transaction/transaction.rs +++ b/client/rpc-spec-v2/src/transaction/transaction.rs @@ -102,7 +102,7 @@ where None::<()>, )); let _ = sink.reject(err); - return Ok(()) + return Ok(()); }, }; @@ -170,8 +170,9 @@ impl TransactionState { event: TransactionStatus, ) -> Option> { match event { - TransactionStatus::Ready | TransactionStatus::Future => - Some(TransactionEvent::::Validated), + TransactionStatus::Ready | TransactionStatus::Future => { + Some(TransactionEvent::::Validated) + }, TransactionStatus::Broadcast(peers) => { // Set the broadcasted flag once if we submitted the transaction to // at least one peer. @@ -181,19 +182,22 @@ impl TransactionState { num_peers: peers.len(), })) }, - TransactionStatus::InBlock((hash, index)) => + TransactionStatus::InBlock((hash, index)) => { Some(TransactionEvent::BestChainBlockIncluded(Some(TransactionBlock { hash, index, - }))), + }))) + }, TransactionStatus::Retracted(_) => Some(TransactionEvent::BestChainBlockIncluded(None)), - TransactionStatus::FinalityTimeout(_) => + TransactionStatus::FinalityTimeout(_) => { Some(TransactionEvent::Dropped(TransactionDropped { broadcasted: self.broadcasted, error: "Maximum number of finality watchers has been reached".into(), - })), - TransactionStatus::Finalized((hash, index)) => - Some(TransactionEvent::Finalized(TransactionBlock { hash, index })), + })) + }, + TransactionStatus::Finalized((hash, index)) => { + Some(TransactionEvent::Finalized(TransactionBlock { hash, index })) + }, TransactionStatus::Usurped(_) => Some(TransactionEvent::Invalid(TransactionError { error: "Extrinsic was rendered invalid by another extrinsic".into(), })), diff --git a/client/rpc/src/author/mod.rs b/client/rpc/src/author/mod.rs index 7d0ffdc62e080..acdff901e6de0 100644 --- a/client/rpc/src/author/mod.rs +++ b/client/rpc/src/author/mod.rs @@ -183,7 +183,7 @@ where Ok(dxt) => dxt, Err(e) => { let _ = sink.reject(JsonRpseeError::from(e)); - return Ok(()) + return Ok(()); }, }; @@ -201,7 +201,7 @@ where Ok(stream) => stream, Err(err) => { let _ = sink.reject(JsonRpseeError::from(err)); - return + return; }, }; diff --git a/client/rpc/src/dev/mod.rs b/client/rpc/src/dev/mod.rs index 7f4b68f56f6f6..e9273a1b206a2 100644 --- a/client/rpc/src/dev/mod.rs +++ b/client/rpc/src/dev/mod.rs @@ -80,7 +80,7 @@ where header.digest_mut().logs.retain(|item| !matches!(item, DigestItem::Seal(_, _))); Block::new(header, body) } else { - return Ok(None) + return Ok(None); } }; let parent_header = { @@ -92,7 +92,7 @@ where if let Some(header) = parent_header { header } else { - return Ok(None) + return Ok(None); } }; let block_len = block.encoded_size() as u64; diff --git a/client/rpc/src/state/mod.rs b/client/rpc/src/state/mod.rs index 7213e4360ae2b..679c6d48ddc29 100644 --- a/client/rpc/src/state/mod.rs +++ b/client/rpc/src/state/mod.rs @@ -240,7 +240,7 @@ where return Err(JsonRpseeError::from(Error::InvalidCount { value: count, max: STORAGE_KEYS_PAGED_MAX_COUNT, - })) + })); } self.backend .storage_keys_paged(block, prefix, count, start_key) @@ -332,7 +332,7 @@ where if keys.is_none() { if let Err(err) = self.deny_unsafe.check_if_safe() { let _ = sink.reject(JsonRpseeError::from(err)); - return Ok(()) + return Ok(()); } } diff --git a/client/rpc/src/state/state_full.rs b/client/rpc/src/state/state_full.rs index 64b6cacaad700..0b6b485e71c0a 100644 --- a/client/rpc/src/state/state_full.rs +++ b/client/rpc/src/state/state_full.rs @@ -105,7 +105,7 @@ where &from_meta, &to_meta, "from number > to number".to_owned(), - )) + )); } // check if we can get from `to` to `from` by going through parent_hashes. @@ -126,7 +126,7 @@ where &from_meta, &to_meta, "from and to are on different forks".to_owned(), - )) + )); } hashes.reverse(); hashes @@ -365,7 +365,7 @@ where Ok(initial) => initial, Err(e) => { let _ = sink.reject(JsonRpseeError::from(e)); - return + return; }, }; @@ -403,7 +403,7 @@ where Ok(stream) => stream, Err(blockchain_err) => { let _ = sink.reject(JsonRpseeError::from(Error::Client(Box::new(blockchain_err)))); - return + return; }, }; @@ -488,8 +488,9 @@ where self.block_or_best(block) .and_then(|block| { let child_info = match ChildType::from_prefixed_key(&storage_key) { - Some((ChildType::ParentKeyId, storage_key)) => - ChildInfo::new_default(storage_key), + Some((ChildType::ParentKeyId, storage_key)) => { + ChildInfo::new_default(storage_key) + }, None => return Err(sp_blockchain::Error::InvalidChildStorageKey), }; self.client @@ -513,8 +514,9 @@ where self.block_or_best(block) .and_then(|block| { let child_info = match ChildType::from_prefixed_key(&storage_key) { - Some((ChildType::ParentKeyId, storage_key)) => - ChildInfo::new_default(storage_key), + Some((ChildType::ParentKeyId, storage_key)) => { + ChildInfo::new_default(storage_key) + }, None => return Err(sp_blockchain::Error::InvalidChildStorageKey), }; self.client.child_storage_keys(block, &child_info, &prefix) @@ -533,8 +535,9 @@ where self.block_or_best(block) .and_then(|block| { let child_info = match ChildType::from_prefixed_key(&storage_key) { - Some((ChildType::ParentKeyId, storage_key)) => - ChildInfo::new_default(storage_key), + Some((ChildType::ParentKeyId, storage_key)) => { + ChildInfo::new_default(storage_key) + }, None => return Err(sp_blockchain::Error::InvalidChildStorageKey), }; self.client.child_storage_keys_iter( @@ -557,8 +560,9 @@ where self.block_or_best(block) .and_then(|block| { let child_info = match ChildType::from_prefixed_key(&storage_key) { - Some((ChildType::ParentKeyId, storage_key)) => - ChildInfo::new_default(storage_key), + Some((ChildType::ParentKeyId, storage_key)) => { + ChildInfo::new_default(storage_key) + }, None => return Err(sp_blockchain::Error::InvalidChildStorageKey), }; self.client.child_storage(block, &child_info, &key) @@ -577,7 +581,7 @@ where { Arc::new(ChildInfo::new_default(storage_key)) } else { - return Err(client_err(sp_blockchain::Error::InvalidChildStorageKey)) + return Err(client_err(sp_blockchain::Error::InvalidChildStorageKey)); }; let block = self.block_or_best(block).map_err(client_err)?; let client = self.client.clone(); @@ -598,8 +602,9 @@ where self.block_or_best(block) .and_then(|block| { let child_info = match ChildType::from_prefixed_key(&storage_key) { - Some((ChildType::ParentKeyId, storage_key)) => - ChildInfo::new_default(storage_key), + Some((ChildType::ParentKeyId, storage_key)) => { + ChildInfo::new_default(storage_key) + }, None => return Err(sp_blockchain::Error::InvalidChildStorageKey), }; self.client.child_storage_hash(block, &child_info, &key) diff --git a/client/rpc/src/system/tests.rs b/client/rpc/src/system/tests.rs index 2f91648008ff7..dc7be4f2b35f1 100644 --- a/client/rpc/src/system/tests.rs +++ b/client/rpc/src/system/tests.rs @@ -101,15 +101,17 @@ fn api>>(sync: T) -> RpcModule> { Request::NetworkAddReservedPeer(peer, sender) => { let _ = match sc_network_common::config::parse_str_addr(&peer) { Ok(_) => sender.send(Ok(())), - Err(s) => - sender.send(Err(error::Error::MalformattedPeerArg(s.to_string()))), + Err(s) => { + sender.send(Err(error::Error::MalformattedPeerArg(s.to_string()))) + }, }; }, Request::NetworkRemoveReservedPeer(peer, sender) => { let _ = match peer.parse::() { Ok(_) => sender.send(Ok(())), - Err(s) => - sender.send(Err(error::Error::MalformattedPeerArg(s.to_string()))), + Err(s) => { + sender.send(Err(error::Error::MalformattedPeerArg(s.to_string()))) + }, }; }, Request::NetworkReservedPeers(sender) => { @@ -368,7 +370,7 @@ fn test_add_reset_log_filter() { }; futures::executor::block_on(fut).expect("`system_resetLogFilter` failed"); } else if line.contains("exit") { - return + return; } log::trace!(target: "test_before_add", "{}", EXPECTED_WITH_TRACE); log::debug!(target: "test_before_add", "{}", EXPECTED_BEFORE_ADD); diff --git a/client/service/src/builder.rs b/client/service/src/builder.rs index 3cb064ec814c5..37e3865e20987 100644 --- a/client/service/src/builder.rs +++ b/client/service/src/builder.rs @@ -117,8 +117,9 @@ impl KeystoreContainer { /// Construct KeystoreContainer pub fn new(config: &KeystoreConfig) -> Result { let keystore = Arc::new(match config { - KeystoreConfig::Path { path, password } => - LocalKeystore::open(path.clone(), password.clone())?, + KeystoreConfig::Path { path, password } => { + LocalKeystore::open(path.clone(), password.clone())? + }, KeystoreConfig::InMemory => LocalKeystore::in_memory(), }); @@ -772,7 +773,7 @@ where let mut request_response_protocol_configs = Vec::new(); if warp_sync.is_none() && config.network.sync_mode.is_warp() { - return Err("Warp sync enabled, but no warp sync provider configured.".into()) + return Err("Warp sync enabled, but no warp sync provider configured.".into()); } if client.requires_full_sync() { @@ -797,8 +798,8 @@ where &protocol_id, config.chain_spec.fork_id(), client.clone(), - config.network.default_peers_set.in_peers as usize + - config.network.default_peers_set.out_peers as usize, + config.network.default_peers_set.in_peers as usize + + config.network.default_peers_set.out_peers as usize, ); spawn_handle.spawn("block-request-handler", Some("networking"), handler.run()); protocol_config @@ -849,8 +850,9 @@ where let (chain_sync, chain_sync_service) = ChainSync::new( match config.network.sync_mode { SyncMode::Full => sc_network_common::sync::SyncMode::Full, - SyncMode::Fast { skip_proofs, storage_chain_mode } => - sc_network_common::sync::SyncMode::LightState { skip_proofs, storage_chain_mode }, + SyncMode::Fast { skip_proofs, storage_chain_mode } => { + sc_network_common::sync::SyncMode::LightState { skip_proofs, storage_chain_mode } + }, SyncMode::Warp => sc_network_common::sync::SyncMode::Warp, }, client.clone(), @@ -979,7 +981,7 @@ where ); // This `return` might seem unnecessary, but we don't want to make it look like // everything is working as normal even though the user is clearly misusing the API. - return + return; } future.await diff --git a/client/service/src/chain_ops/export_blocks.rs b/client/service/src/chain_ops/export_blocks.rs index d442a11f2c39b..ec7b6950f1d35 100644 --- a/client/service/src/chain_ops/export_blocks.rs +++ b/client/service/src/chain_ops/export_blocks.rs @@ -61,7 +61,7 @@ where let client = &client; if last < block { - return Poll::Ready(Err("Invalid block range specified".into())) + return Poll::Ready(Err("Invalid block range specified".into())); } if !wrote_header { @@ -76,13 +76,14 @@ where } match client.block(&BlockId::number(block))? { - Some(block) => + Some(block) => { if binary { output.write_all(&block.encode())?; } else { serde_json::to_writer(&mut output, &block) .map_err(|e| format!("Error writing JSON: {}", e))?; - }, + } + }, // Reached end of the chain. None => return Poll::Ready(Ok(())), } @@ -90,7 +91,7 @@ where info!("#{}", block); } if block == last { - return Poll::Ready(Ok(())) + return Poll::Ready(Ok(())); } block += One::one(); diff --git a/client/service/src/chain_ops/import_blocks.rs b/client/service/src/chain_ops/import_blocks.rs index c0612124dd0c2..ea4ed1e9d2c7d 100644 --- a/client/service/src/chain_ops/import_blocks.rs +++ b/client/service/src/chain_ops/import_blocks.rs @@ -102,8 +102,8 @@ where /// Returns the number of blocks read thus far. fn read_block_count(&self) -> u64 { match self { - BlockIter::Binary { read_block_count, .. } | - BlockIter::Json { read_block_count, .. } => *read_block_count, + BlockIter::Binary { read_block_count, .. } + | BlockIter::Json { read_block_count, .. } => *read_block_count, } } @@ -227,8 +227,8 @@ impl Speedometer { let speed = diff .saturating_mul(10_000) .checked_div(u128::from(elapsed_ms)) - .map_or(0.0, |s| s as f64) / - 10.0; + .map_or(0.0, |s| s as f64) + / 10.0; info!("📦 Current best block: {} ({:4.1} bps)", self.best_number, speed); } else { // If the number of blocks can't be converted to a regular integer, then we need a more @@ -324,7 +324,7 @@ where if let (Err(err), hash) = result { warn!("There was an error importing block with hash {:?}: {}", hash, err); self.has_error = true; - break + break; } } } @@ -338,7 +338,7 @@ where Err(e) => { // We've encountered an error while creating the block iterator // so we can just return a future that returns an error. - return future::ready(Err(Error::Other(e))).boxed() + return future::ready(Err(Error::Other(e))).boxed(); }, }; @@ -388,11 +388,12 @@ where state = Some(ImportState::Reading { block_iter }); } }, - Err(e) => + Err(e) => { return Poll::Ready(Err(Error::Other(format!( "Error reading block #{}: {}", read_block_count, e - )))), + )))) + }, } }, } @@ -408,7 +409,7 @@ where delay, block, }); - return Poll::Pending + return Poll::Pending; }, Poll::Ready(_) => { delay.reset(Duration::from_millis(DELAY_TIME)); @@ -440,7 +441,7 @@ where read_block_count, client.info().best_number ); - return Poll::Ready(Ok(())) + return Poll::Ready(Ok(())); } else { // Importing is not done, we still have to wait for the queue to finish. // Wait for the delay, because we know the queue is lagging behind. @@ -451,7 +452,7 @@ where read_block_count, delay, }); - return Poll::Pending + return Poll::Pending; }, Poll::Ready(_) => { delay.reset(Duration::from_millis(DELAY_TIME)); @@ -476,7 +477,7 @@ where return Poll::Ready(Err(Error::Other(format!( "Stopping after #{} blocks because of an error", link.imported_blocks - )))) + )))); } cx.waker().wake_by_ref(); diff --git a/client/service/src/client/block_rules.rs b/client/service/src/client/block_rules.rs index 2ed27b8fe1b63..1fa4999d59525 100644 --- a/client/service/src/client/block_rules.rs +++ b/client/service/src/client/block_rules.rs @@ -61,12 +61,12 @@ impl BlockRules { pub fn lookup(&self, number: NumberFor, hash: &B::Hash) -> LookupResult { if let Some(hash_for_height) = self.forks.get(&number) { if hash_for_height != hash { - return LookupResult::Expected(*hash_for_height) + return LookupResult::Expected(*hash_for_height); } } if self.bad.contains(hash) { - return LookupResult::KnownBad + return LookupResult::KnownBad; } LookupResult::NotSpecial diff --git a/client/service/src/client/client.rs b/client/service/src/client/client.rs index 1d896d8acd8bf..c56745d48114d 100644 --- a/client/service/src/client/client.rs +++ b/client/service/src/client/client.rs @@ -466,7 +466,7 @@ where } = import_block; if !intermediates.is_empty() { - return Err(Error::IncompletePipeline) + return Err(Error::IncompletePipeline); } let fork_choice = fork_choice.ok_or(Error::IncompletePipeline)?; @@ -548,8 +548,8 @@ where { let parent_hash = *import_headers.post().parent_hash(); let status = self.backend.blockchain().status(BlockId::Hash(hash))?; - let parent_exists = self.backend.blockchain().status(BlockId::Hash(parent_hash))? == - blockchain::BlockStatus::InChain; + let parent_exists = self.backend.blockchain().status(BlockId::Hash(parent_hash))? + == blockchain::BlockStatus::InChain; match (import_existing, status) { (false, blockchain::BlockStatus::InChain) => return Ok(ImportResult::AlreadyInChain), (false, blockchain::BlockStatus::Unknown) => {}, @@ -566,19 +566,20 @@ where // the block is lower than our last finalized block so it must revert // finality, refusing import. - if status == blockchain::BlockStatus::Unknown && - *import_headers.post().number() <= info.finalized_number && - !gap_block + if status == blockchain::BlockStatus::Unknown + && *import_headers.post().number() <= info.finalized_number + && !gap_block { - return Err(sp_blockchain::Error::NotInFinalizedChain) + return Err(sp_blockchain::Error::NotInFinalizedChain); } // this is a fairly arbitrary choice of where to draw the line on making notifications, // but the general goal is to only make notifications when we are already fully synced // and get a new chain head. let make_notifications = match origin { - BlockOrigin::NetworkBroadcast | BlockOrigin::Own | BlockOrigin::ConsensusBroadcast => - true, + BlockOrigin::NetworkBroadcast | BlockOrigin::Own | BlockOrigin::ConsensusBroadcast => { + true + }, BlockOrigin::Genesis | BlockOrigin::NetworkInitialSync | BlockOrigin::File => false, }; @@ -612,12 +613,14 @@ where let storage_key = PrefixedStorageKey::new_ref(&parent_storage); let storage_key = match ChildType::from_prefixed_key(storage_key) { - Some((ChildType::ParentKeyId, storage_key)) => - storage_key, - None => + Some((ChildType::ParentKeyId, storage_key)) => { + storage_key + }, + None => { return Err(Error::Backend( "Invalid child storage key.".to_string(), - )), + )) + }, }; let entry = storage .children_default @@ -642,7 +645,7 @@ where // State root mismatch when importing state. This should not happen in // safe fast sync mode, but may happen in unsafe mode. warn!("Error importing state: State root mismatch."); - return Err(Error::InvalidStateRoot) + return Err(Error::InvalidStateRoot); } None }, @@ -666,11 +669,12 @@ where )?; } - let is_new_best = !gap_block && - (finalized || - match fork_choice { - ForkChoiceStrategy::LongestChain => - import_headers.post().number() > &info.best_number, + let is_new_best = !gap_block + && (finalized + || match fork_choice { + ForkChoiceStrategy::LongestChain => { + import_headers.post().number() > &info.best_number + }, ForkChoiceStrategy::Custom(v) => v, }); @@ -780,18 +784,21 @@ where let at = BlockId::Hash(*parent_hash); let state_action = std::mem::replace(&mut import_block.state_action, StateAction::Skip); let (enact_state, storage_changes) = match (self.block_status(&at)?, state_action) { - (BlockStatus::KnownBad, _) => - return Ok(PrepareStorageChangesResult::Discard(ImportResult::KnownBad)), + (BlockStatus::KnownBad, _) => { + return Ok(PrepareStorageChangesResult::Discard(ImportResult::KnownBad)) + }, ( BlockStatus::InChainPruned, StateAction::ApplyChanges(sc_consensus::StorageChanges::Changes(_)), ) => return Ok(PrepareStorageChangesResult::Discard(ImportResult::MissingState)), (_, StateAction::ApplyChanges(changes)) => (true, Some(changes)), - (BlockStatus::Unknown, _) => - return Ok(PrepareStorageChangesResult::Discard(ImportResult::UnknownParent)), + (BlockStatus::Unknown, _) => { + return Ok(PrepareStorageChangesResult::Discard(ImportResult::UnknownParent)) + }, (_, StateAction::Skip) => (false, None), - (BlockStatus::InChainPruned, StateAction::Execute) => - return Ok(PrepareStorageChangesResult::Discard(ImportResult::MissingState)), + (BlockStatus::InChainPruned, StateAction::Execute) => { + return Ok(PrepareStorageChangesResult::Discard(ImportResult::MissingState)) + }, (BlockStatus::InChainPruned, StateAction::ExecuteIfPossible) => (false, None), (_, StateAction::Execute) => (true, None), (_, StateAction::ExecuteIfPossible) => (true, None), @@ -820,7 +827,7 @@ where if import_block.header.state_root() != &gen_storage_changes.transaction_storage_root { - return Err(Error::InvalidStateRoot) + return Err(Error::InvalidStateRoot); } Some(sc_consensus::StorageChanges::Changes(gen_storage_changes)) }, @@ -849,7 +856,7 @@ where "Possible safety violation: attempted to re-finalize last finalized block {:?} ", last_finalized ); - return Ok(()) + return Ok(()); } let route_from_finalized = @@ -862,7 +869,7 @@ where retracted, last_finalized ); - return Err(sp_blockchain::Error::NotInFinalizedChain) + return Err(sp_blockchain::Error::NotInFinalizedChain); } let route_from_best = @@ -931,7 +938,7 @@ where // since we won't be running the loop below which // would also remove any closed sinks. sinks.retain(|sink| !sink.is_closed()); - return Ok(()) + return Ok(()); }, }; @@ -963,7 +970,7 @@ where // temporary leak of closed/discarded notification sinks (e.g. // from consensus code). self.import_notification_sinks.lock().retain(|sink| !sink.is_closed()); - return Ok(()) + return Ok(()); }, }; @@ -1024,7 +1031,7 @@ where // this can probably be implemented more efficiently if let BlockId::Hash(ref h) = id { if self.importing_block.read().as_ref().map_or(false, |importing| h == importing) { - return Ok(BlockStatus::Queued) + return Ok(BlockStatus::Queued); } } let hash_and_number = match *id { @@ -1032,12 +1039,13 @@ where BlockId::Number(n) => self.backend.blockchain().hash(n)?.map(|hash| (hash, n)), }; match hash_and_number { - Some((hash, number)) => + Some((hash, number)) => { if self.backend.have_state_at(hash, number) { Ok(BlockStatus::InChainWithState) } else { Ok(BlockStatus::InChainPruned) - }, + } + }, None => Ok(BlockStatus::Unknown), } } @@ -1073,7 +1081,7 @@ where let genesis_hash = self.backend.blockchain().info().genesis_hash; if genesis_hash == target_hash { - return Ok(Vec::new()) + return Ok(Vec::new()); } let mut current_hash = target_hash; @@ -1089,7 +1097,7 @@ where current_hash = ancestor_hash; if genesis_hash == current_hash { - break + break; } current = ancestor; @@ -1203,14 +1211,15 @@ where size_limit: usize, ) -> sp_blockchain::Result> { if start_key.len() > MAX_NESTED_TRIE_DEPTH { - return Err(Error::Backend("Invalid start key.".to_string())) + return Err(Error::Backend("Invalid start key.".to_string())); } let state = self.state_at(hash)?; let child_info = |storage_key: &Vec| -> sp_blockchain::Result { let storage_key = PrefixedStorageKey::new_ref(storage_key); match ChildType::from_prefixed_key(storage_key) { - Some((ChildType::ParentKeyId, storage_key)) => - Ok(ChildInfo::new_default(storage_key)), + Some((ChildType::ParentKeyId, storage_key)) => { + Ok(ChildInfo::new_default(storage_key)) + }, None => Err(Error::Backend("Invalid child storage key.".to_string())), } }; @@ -1222,7 +1231,7 @@ where { Some((child_info(start_key)?, child_root)) } else { - return Err(Error::Backend("Invalid root start key.".to_string())) + return Err(Error::Backend("Invalid root start key.".to_string())); } } else { None @@ -1266,18 +1275,18 @@ where let size = value.len() + next_key.len(); if total_size + size > size_limit && !entries.is_empty() { complete = false; - break + break; } total_size += size; - if current_child.is_none() && - sp_core::storage::well_known_keys::is_child_storage_key(next_key.as_slice()) && - !child_roots.contains(value.as_slice()) + if current_child.is_none() + && sp_core::storage::well_known_keys::is_child_storage_key(next_key.as_slice()) + && !child_roots.contains(value.as_slice()) { child_roots.insert(value.clone()); switch_child_key = Some((next_key.clone(), value.clone())); entries.push((next_key.clone(), value)); - break + break; } entries.push((next_key.clone(), value)); current_key = next_key; @@ -1297,12 +1306,12 @@ where complete, )); if !complete { - break + break; } } else { result[0].0.key_values.extend(entries.into_iter()); result[0].1 = complete; - break + break; } } Ok(result) @@ -1759,7 +1768,7 @@ where match self.block_rules.lookup(number, &hash) { BlockLookupResult::KnownBad => { trace!("Rejecting known bad block: #{} {:?}", number, hash); - return Ok(ImportResult::KnownBad) + return Ok(ImportResult::KnownBad); }, BlockLookupResult::Expected(expected_hash) => { trace!( @@ -1768,7 +1777,7 @@ where expected_hash, number ); - return Ok(ImportResult::KnownBad) + return Ok(ImportResult::KnownBad); }, BlockLookupResult::NotSpecial => {}, } @@ -1779,10 +1788,12 @@ where .block_status(&BlockId::Hash(hash)) .map_err(|e| ConsensusError::ClientImport(e.to_string()))? { - BlockStatus::InChainWithState | BlockStatus::Queued => - return Ok(ImportResult::AlreadyInChain), - BlockStatus::InChainPruned if !import_existing => - return Ok(ImportResult::AlreadyInChain), + BlockStatus::InChainWithState | BlockStatus::Queued => { + return Ok(ImportResult::AlreadyInChain) + }, + BlockStatus::InChainPruned if !import_existing => { + return Ok(ImportResult::AlreadyInChain) + }, BlockStatus::InChainPruned => {}, BlockStatus::Unknown => {}, BlockStatus::KnownBad => return Ok(ImportResult::KnownBad), @@ -1949,8 +1960,9 @@ where Some(header) => { let hash = header.hash(); match (self.body(hash)?, self.justifications(hash)?) { - (Some(extrinsics), justifications) => - Some(SignedBlock { block: Block::new(header, extrinsics), justifications }), + (Some(extrinsics), justifications) => { + Some(SignedBlock { block: Block::new(header, extrinsics), justifications }) + }, _ => None, } }, diff --git a/client/service/src/client/wasm_override.rs b/client/service/src/client/wasm_override.rs index 5fc748f3e88b9..9691ef051378d 100644 --- a/client/service/src/client/wasm_override.rs +++ b/client/service/src/client/wasm_override.rs @@ -178,7 +178,7 @@ impl WasmOverride { }; if !dir.is_dir() { - return Err(WasmOverrideError::NotADirectory(dir.to_owned()).into()) + return Err(WasmOverrideError::NotADirectory(dir.to_owned()).into()); } let mut overrides = HashMap::new(); @@ -214,7 +214,7 @@ impl WasmOverride { } if !duplicates.is_empty() { - return Err(WasmOverrideError::DuplicateRuntime(duplicates).into()) + return Err(WasmOverrideError::DuplicateRuntime(duplicates).into()); } Ok(overrides) diff --git a/client/service/src/lib.rs b/client/service/src/lib.rs index 091b4bbe9fe5f..f593af659d756 100644 --- a/client/service/src/lib.rs +++ b/client/service/src/lib.rs @@ -383,8 +383,9 @@ where match tokio::task::block_in_place(|| { config.tokio_handle.block_on(futures::future::try_join(http_fut, ws_fut)) }) { - Ok((http, ws)) => - Ok(Box::new((waiting::HttpServer(Some(http)), waiting::WsServer(Some(ws))))), + Ok((http, ws)) => { + Ok(Box::new((waiting::HttpServer(Some(http)), waiting::WsServer(Some(ws))))) + }, Err(e) => Err(Error::Application(e)), } } @@ -444,7 +445,7 @@ where Ok(uxt) => uxt, Err(e) => { debug!("Transaction invalid: {:?}", e); - return Box::pin(futures::future::ready(TransactionImport::Bad)) + return Box::pin(futures::future::ready(TransactionImport::Bad)); }, }; @@ -459,8 +460,9 @@ where match import_future.await { Ok(_) => TransactionImport::NewGood, Err(e) => match e.into_pool_error() { - Ok(sc_transaction_pool_api::error::Error::AlreadyImported(_)) => - TransactionImport::KnownGood, + Ok(sc_transaction_pool_api::error::Error::AlreadyImported(_)) => { + TransactionImport::KnownGood + }, Ok(e) => { debug!("Error adding transaction to the pool: {:?}", e); TransactionImport::Bad diff --git a/client/service/test/src/lib.rs b/client/service/test/src/lib.rs index 5d29d34a3cbf2..72743fc2c43e1 100644 --- a/client/service/test/src/lib.rs +++ b/client/service/test/src/lib.rs @@ -175,7 +175,7 @@ where .iter() .all(|&(ref id, ref service, _, _)| full_predicate(*id, service)) { - break + break; } } }; diff --git a/client/state-db/src/lib.rs b/client/state-db/src/lib.rs index 01a198a1b3c1e..cde4b8d6d9f34 100644 --- a/client/state-db/src/lib.rs +++ b/client/state-db/src/lib.rs @@ -189,8 +189,9 @@ impl fmt::Debug for StateDbError { Self::TooManySiblingBlocks => write!(f, "Too many sibling blocks inserted"), Self::BlockAlreadyExists => write!(f, "Block already exists"), Self::Metadata(message) => write!(f, "Invalid metadata: {}", message), - Self::BlockUnavailable => - write!(f, "Trying to get a block record from db while it is not commit to db yet"), + Self::BlockUnavailable => { + write!(f, "Trying to get a block record from db while it is not commit to db yet") + }, Self::BlockMissing => write!(f, "Block record is missing from the pruning window"), } } @@ -307,8 +308,9 @@ impl let non_canonical: NonCanonicalOverlay = NonCanonicalOverlay::new(&db)?; let pruning: Option> = match mode { PruningMode::Constrained(Constraints { max_mem: Some(_), .. }) => unimplemented!(), - PruningMode::Constrained(Constraints { max_blocks, .. }) => - Some(RefWindow::new(db, max_blocks.unwrap_or(0), ref_counting)?), + PruningMode::Constrained(Constraints { max_blocks, .. }) => { + Some(RefWindow::new(db, max_blocks.unwrap_or(0), ref_counting)?) + }, PruningMode::ArchiveAll | PruningMode::ArchiveCanonical => None, }; @@ -342,7 +344,7 @@ impl // the database atomically to keep their consistency when restarting the node let mut commit = CommitSet::default(); if self.mode == PruningMode::ArchiveAll { - return Ok(commit) + return Ok(commit); } let number = self.non_canonical.canonicalize(hash, &mut commit)?; if self.mode == PruningMode::ArchiveCanonical { @@ -389,21 +391,22 @@ impl { loop { if pruning.window_size() <= constraints.max_blocks.unwrap_or(0) as u64 { - break + break; } if constraints.max_mem.map_or(false, |m| pruning.mem_used() > m) { - break + break; } let pinned = &self.pinned; match pruning.next_hash() { // the block record is temporary unavailable, break and try next time Err(Error::StateDb(StateDbError::BlockUnavailable)) => break, - res => + res => { if res?.map_or(false, |h| pinned.contains_key(&h)) { - break - }, + break; + } + }, } match pruning.prune_one(commit) { // this branch should not reach as previous `next_hash` don't return error @@ -422,16 +425,18 @@ impl fn revert_one(&mut self) -> Option> { match self.mode { PruningMode::ArchiveAll => Some(CommitSet::default()), - PruningMode::ArchiveCanonical | PruningMode::Constrained(_) => - self.non_canonical.revert_one(), + PruningMode::ArchiveCanonical | PruningMode::Constrained(_) => { + self.non_canonical.revert_one() + }, } } fn remove(&mut self, hash: &BlockHash) -> Option> { match self.mode { PruningMode::ArchiveAll => Some(CommitSet::default()), - PruningMode::ArchiveCanonical | PruningMode::Constrained(_) => - self.non_canonical.remove(hash), + PruningMode::ArchiveCanonical | PruningMode::Constrained(_) => { + self.non_canonical.remove(hash) + }, } } @@ -442,8 +447,8 @@ impl match self.mode { PruningMode::ArchiveAll => Ok(()), PruningMode::ArchiveCanonical | PruningMode::Constrained(_) => { - let have_block = self.non_canonical.have_block(hash) || - self.pruning.as_ref().map_or(false, |pruning| { + let have_block = self.non_canonical.have_block(hash) + || self.pruning.as_ref().map_or(false, |pruning| { match pruning.have_block(hash, number) { HaveBlock::No => false, HaveBlock::Yes => true, @@ -492,7 +497,7 @@ impl Q: std::hash::Hash + Eq, { if let Some(value) = self.non_canonical.get(key) { - return Ok(Some(value)) + return Ok(Some(value)); } db.get(key.as_ref()).map_err(Error::Db) } @@ -530,11 +535,12 @@ impl requested_mode.unwrap_or_default() }, - (false, None, _) => + (false, None, _) => { return Err(StateDbError::Metadata( "An existing StateDb does not have PRUNING_MODE stored in its meta-data".into(), ) - .into()), + .into()) + }, (false, Some(stored), None) => stored, @@ -623,12 +629,12 @@ impl /// Returns last finalized block number. pub fn best_canonical(&self) -> Option { - return self.db.read().best_canonical() + return self.db.read().best_canonical(); } /// Check if block is pruned away. pub fn is_pruned(&self, hash: &BlockHash, number: u64) -> IsPruned { - return self.db.read().is_pruned(hash, number) + return self.db.read().is_pruned(hash, number); } /// Reset in-memory changes to the last disk-backed state. @@ -678,10 +684,12 @@ fn choose_pruning_mode( ) -> Result { match (stored, requested) { (PruningMode::ArchiveAll, PruningMode::ArchiveAll) => Ok(PruningMode::ArchiveAll), - (PruningMode::ArchiveCanonical, PruningMode::ArchiveCanonical) => - Ok(PruningMode::ArchiveCanonical), - (PruningMode::Constrained(_), PruningMode::Constrained(requested)) => - Ok(PruningMode::Constrained(requested)), + (PruningMode::ArchiveCanonical, PruningMode::ArchiveCanonical) => { + Ok(PruningMode::ArchiveCanonical) + }, + (PruningMode::Constrained(_), PruningMode::Constrained(requested)) => { + Ok(PruningMode::Constrained(requested)) + }, (stored, requested) => Err(StateDbError::IncompatiblePruningModes { requested, stored }), } } diff --git a/client/state-db/src/noncanonical.rs b/client/state-db/src/noncanonical.rs index 3711cf7a42667..6d35ab8bf0dc1 100644 --- a/client/state-db/src/noncanonical.rs +++ b/client/state-db/src/noncanonical.rs @@ -138,8 +138,8 @@ fn discard_descendants( while let Some(i) = level.blocks.iter().position(|overlay| { parents .get(&overlay.hash) - .expect("there is a parent entry for each entry in levels; qed") == - hash + .expect("there is a parent entry for each entry in levels; qed") + == hash }) { let overlay = level.remove(i); let mut num_pinned = discard_descendants( @@ -214,7 +214,7 @@ impl NonCanonicalOverlay { } } if level.blocks.is_empty() { - break + break; } levels.push_back(level); block += 1; @@ -258,7 +258,7 @@ impl NonCanonicalOverlay { front_block_number, front_block_number + self.levels.len() as u64, ); - return Err(StateDbError::InvalidBlockNumber) + return Err(StateDbError::InvalidBlockNumber); } // check for valid parent if inserting on second level or higher if number == front_block_number { @@ -267,14 +267,14 @@ impl NonCanonicalOverlay { .as_ref() .map_or(false, |&(ref h, n)| h == parent_hash && n == number - 1) { - return Err(StateDbError::InvalidParent) + return Err(StateDbError::InvalidParent); } } else if !self.parents.contains_key(parent_hash) { - return Err(StateDbError::InvalidParent) + return Err(StateDbError::InvalidParent); } } - let level = if self.levels.is_empty() || - number == front_block_number + self.levels.len() as u64 + let level = if self.levels.is_empty() + || number == front_block_number + self.levels.len() as u64 { self.levels.push_back(OverlayLevel::new()); self.levels.back_mut().expect("can't be empty after insertion; qed") @@ -284,10 +284,10 @@ impl NonCanonicalOverlay { }; if level.blocks.len() >= MAX_BLOCKS_PER_LEVEL as usize { - return Err(StateDbError::TooManySiblingBlocks) + return Err(StateDbError::TooManySiblingBlocks); } if level.blocks.iter().any(|b| b.hash == *hash) { - return Err(StateDbError::BlockAlreadyExists) + return Err(StateDbError::BlockAlreadyExists); } let index = level.available_index(); @@ -472,13 +472,13 @@ impl NonCanonicalOverlay { // Check that it does not have any children if (level_index != level_count - 1) && self.parents.values().any(|h| h == hash) { log::debug!(target: "state-db", "Trying to remove block {:?} with children", hash); - return None + return None; } let overlay = level.remove(index); commit.meta.deleted.push(overlay.journal_key); self.parents.remove(&overlay.hash); discard_values(&mut self.values, overlay.inserted); - break + break; } if self.levels.back().map_or(false, |l| l.blocks.is_empty()) { self.levels.pop_back(); @@ -548,8 +548,8 @@ mod tests { use sp_core::H256; fn contains(overlay: &NonCanonicalOverlay, key: u64) -> bool { - overlay.get(&H256::from_low_u64_be(key)) == - Some(H256::from_low_u64_be(key).as_bytes().to_vec()) + overlay.get(&H256::from_low_u64_be(key)) + == Some(H256::from_low_u64_be(key).as_bytes().to_vec()) } #[test] diff --git a/client/state-db/src/pruning.rs b/client/state-db/src/pruning.rs index 458522b8119fd..9f5a2609b8f2c 100644 --- a/client/state-db/src/pruning.rs +++ b/client/state-db/src/pruning.rs @@ -358,7 +358,7 @@ impl RefWindow { // if the queue is empty or the block number exceed the pruning window, we definitely // do not have this block if self.is_empty() || number < self.base || number >= self.base + self.window_size() { - return HaveBlock::No + return HaveBlock::No; } self.queue.have_block(hash, (number - self.base) as usize) } @@ -390,7 +390,7 @@ impl RefWindow { // assume that parent was canonicalized self.base = number; } else if (self.base + self.window_size()) != number { - return Err(Error::StateDb(StateDbError::InvalidBlockNumber)) + return Err(Error::StateDb(StateDbError::InvalidBlockNumber)); } trace!(target: "state-db", "Adding to pruning window: {:?} ({} inserted, {} deleted)", hash, commit.data.inserted.len(), commit.data.deleted.len()); let inserted = if matches!(self.queue, DeathRowQueue::Mem { .. }) { diff --git a/client/sysinfo/src/sysinfo.rs b/client/sysinfo/src/sysinfo.rs index c66a6f6a62aed..0ea22341c9af3 100644 --- a/client/sysinfo/src/sysinfo.rs +++ b/client/sysinfo/src/sysinfo.rs @@ -132,7 +132,7 @@ where S: Serializer, { if let Some(throughput) = maybe_throughput { - return serializer.serialize_some(&(throughput.as_mibs() as u64)) + return serializer.serialize_some(&(throughput.as_mibs() as u64)); } serializer.serialize_none() } @@ -159,7 +159,7 @@ pub(crate) fn benchmark( elapsed = timestamp.elapsed(); if elapsed >= max_duration { - break + break; } } @@ -567,16 +567,16 @@ mod tests { #[test] fn test_benchmark_disk_sequential_writes() { assert!( - benchmark_disk_sequential_writes(DEFAULT_DISK_EXECUTION_LIMIT, "./".as_ref()).unwrap() > - Throughput::from_mibs(0.0) + benchmark_disk_sequential_writes(DEFAULT_DISK_EXECUTION_LIMIT, "./".as_ref()).unwrap() + > Throughput::from_mibs(0.0) ); } #[test] fn test_benchmark_disk_random_writes() { assert!( - benchmark_disk_random_writes(DEFAULT_DISK_EXECUTION_LIMIT, "./".as_ref()).unwrap() > - Throughput::from_mibs(0.0) + benchmark_disk_random_writes(DEFAULT_DISK_EXECUTION_LIMIT, "./".as_ref()).unwrap() + > Throughput::from_mibs(0.0) ); } diff --git a/client/telemetry/src/endpoints.rs b/client/telemetry/src/endpoints.rs index fba3822a90676..fa4e8edbaf898 100644 --- a/client/telemetry/src/endpoints.rs +++ b/client/telemetry/src/endpoints.rs @@ -65,7 +65,7 @@ fn url_to_multiaddr(url: &str) -> Result { // If not, try the `ws://path/url` format. if let Ok(ma) = libp2p::multiaddr::from_url(url) { - return Ok(ma) + return Ok(ma); } // If we have no clue about the format of that string, assume that we were expecting a diff --git a/client/telemetry/src/lib.rs b/client/telemetry/src/lib.rs index 503a326f76c2b..5d859bc5f4417 100644 --- a/client/telemetry/src/lib.rs +++ b/client/telemetry/src/lib.rs @@ -263,7 +263,7 @@ impl TelemetryWorker { "Could not initialise transport: {}", err, ); - continue + continue; }, }; entry.insert(Node::new(transport, addr.clone(), Vec::new(), Vec::new())) @@ -328,12 +328,12 @@ impl TelemetryWorker { message, )), ); - return + return; }; for (node_max_verbosity, addr) in nodes { if verbosity > *node_max_verbosity { - continue + continue; } if let Some(node) = node_pool.get_mut(addr) { diff --git a/client/telemetry/src/node.rs b/client/telemetry/src/node.rs index 0d71a363a1b26..43cccdf916f36 100644 --- a/client/telemetry/src/node.rs +++ b/client/telemetry/src/node.rs @@ -124,7 +124,7 @@ where ) -> Poll> { while let Some(item) = conn.buf.pop() { if let Err(e) = conn.sink.start_send_unpin(item) { - return Poll::Ready(Err(e)) + return Poll::Ready(Err(e)); } futures::ready!(conn.sink.poll_ready_unpin(cx))?; } @@ -157,11 +157,11 @@ where }, Poll::Ready(Ok(())) => { self.socket = NodeSocket::Connected(conn); - return Poll::Ready(Ok(())) + return Poll::Ready(Ok(())); }, Poll::Pending => { self.socket = NodeSocket::Connected(conn); - return Poll::Pending + return Poll::Pending; }, } }, @@ -171,7 +171,7 @@ where }, Poll::Pending => { self.socket = NodeSocket::Connected(conn); - return Poll::Pending + return Poll::Pending; }, }, NodeSocket::Dialing(mut s) => match Future::poll(Pin::new(&mut s), cx) { @@ -187,7 +187,7 @@ where log::debug!(target: "telemetry", "Failed to send a telemetry connection notification: {}", error); } else { self.telemetry_connection_notifier.swap_remove(index); - continue + continue; } } index += 1; @@ -244,12 +244,12 @@ where if Future::poll(Pin::new(&mut s), cx).is_ready() { socket = NodeSocket::ReconnectNow; } else { - break NodeSocket::WaitingReconnect(s) + break NodeSocket::WaitingReconnect(s); } }, NodeSocket::Poisoned => { log::error!(target: "telemetry", "‼️ Poisoned connection with {}", self.addr); - break NodeSocket::Poisoned + break NodeSocket::Poisoned; }, } }; diff --git a/client/telemetry/src/transport.rs b/client/telemetry/src/transport.rs index d64da44a83b6b..cdfd79732ee1f 100644 --- a/client/telemetry/src/transport.rs +++ b/client/telemetry/src/transport.rs @@ -112,7 +112,7 @@ impl StreamSink { log::error!(target: "telemetry", "Detected some internal buffering happening in the telemetry"); let err = io::Error::new(io::ErrorKind::Other, "Internal buffering detected"); - return Poll::Ready(Err(err)) + return Poll::Ready(Err(err)); } } diff --git a/client/tracing/proc-macro/src/lib.rs b/client/tracing/proc-macro/src/lib.rs index ba757619fb5a0..39d0e16e991a2 100644 --- a/client/tracing/proc-macro/src/lib.rs +++ b/client/tracing/proc-macro/src/lib.rs @@ -113,7 +113,7 @@ pub fn prefix_logs_with(arg: TokenStream, item: TokenStream) -> TokenStream { "missing argument: name of the node. Example: sc_cli::prefix_logs_with()", ) .to_compile_error() - .into() + .into(); } let name = syn::parse_macro_input!(arg as Expr); diff --git a/client/tracing/src/block/mod.rs b/client/tracing/src/block/mod.rs index 63fd1de374cba..1833c81b38c6d 100644 --- a/client/tracing/src/block/mod.rs +++ b/client/tracing/src/block/mod.rs @@ -109,11 +109,11 @@ impl BlockSubscriber { impl Subscriber for BlockSubscriber { fn enabled(&self, metadata: &tracing::Metadata<'_>) -> bool { if !metadata.is_span() && metadata.fields().field(REQUIRED_EVENT_FIELD).is_none() { - return false + return false; } for (target, level) in &self.targets { if metadata.level() <= level && metadata.target().starts_with(target) { - return true + return true; } } false @@ -255,7 +255,7 @@ where return Err(Error::Dispatch(format!( "Failed to collect traces and execute block: {}", e - ))) + ))); } } @@ -339,7 +339,7 @@ fn patch_and_filter(mut span: SpanDatum, targets: &str) -> Option { span.target = t; } if !check_target(targets, &span.target, &span.level) { - return None + return None; } } Some(span.into()) @@ -349,7 +349,7 @@ fn patch_and_filter(mut span: SpanDatum, targets: &str) -> Option { fn check_target(targets: &str, target: &str, level: &Level) -> bool { for (t, l) in targets.split(',').map(crate::parse_target) { if target.starts_with(t.as_str()) && level <= &l { - return true + return true; } } false diff --git a/client/tracing/src/lib.rs b/client/tracing/src/lib.rs index 1ae695a725f3f..89aa2219f52aa 100644 --- a/client/tracing/src/lib.rs +++ b/client/tracing/src/lib.rs @@ -141,10 +141,10 @@ impl Values { /// Checks if all individual collections are empty pub fn is_empty(&self) -> bool { - self.bool_values.is_empty() && - self.i64_values.is_empty() && - self.u64_values.is_empty() && - self.string_values.is_empty() + self.bool_values.is_empty() + && self.i64_values.is_empty() + && self.u64_values.is_empty() + && self.string_values.is_empty() } } @@ -175,10 +175,10 @@ impl Serialize for Values { where S: Serializer, { - let len = self.bool_values.len() + - self.i64_values.len() + - self.u64_values.len() + - self.string_values.len(); + let len = self.bool_values.len() + + self.i64_values.len() + + self.u64_values.len() + + self.string_values.len(); let mut map = serializer.serialize_map(Some(len))?; for (k, v) in &self.bool_values { map.serialize_entry(k, v)?; @@ -250,7 +250,7 @@ impl ProfilingLayer { fn check_target(&self, target: &str, level: &Level) -> bool { for t in &self.targets { if target.starts_with(t.0.as_str()) && level <= &t.1 { - return true + return true; } } false @@ -627,7 +627,7 @@ mod tests { tracing::event!(target: "test_target", tracing::Level::INFO, "test_event1"); for msg in rx.recv() { if !msg { - break + break; } } // guard2 and span2 dropped / exited diff --git a/client/tracing/src/logging/event_format.rs b/client/tracing/src/logging/event_format.rs index aec6b76843daf..ab90dec6bf6b1 100644 --- a/client/tracing/src/logging/event_format.rs +++ b/client/tracing/src/logging/event_format.rs @@ -95,7 +95,7 @@ where let exts = span.extensions(); if let Some(prefix) = exts.get::() { write!(writer, "{}", prefix.as_str())?; - break + break; } } } @@ -130,10 +130,10 @@ where writer: &mut dyn fmt::Write, event: &Event, ) -> fmt::Result { - if self.dup_to_stdout && - (event.metadata().level() == &Level::INFO || - event.metadata().level() == &Level::WARN || - event.metadata().level() == &Level::ERROR) + if self.dup_to_stdout + && (event.metadata().level() == &Level::INFO + || event.metadata().level() == &Level::WARN + || event.metadata().level() == &Level::ERROR) { let mut out = String::new(); self.format_event_custom(CustomFmtContext::FmtContext(ctx), &mut out, event)?; @@ -276,8 +276,9 @@ where ) -> fmt::Result { match self { CustomFmtContext::FmtContext(fmt_ctx) => fmt_ctx.format_fields(writer, fields), - CustomFmtContext::ContextWithFormatFields(_ctx, fmt_fields) => - fmt_fields.format_fields(writer, fields), + CustomFmtContext::ContextWithFormatFields(_ctx, fmt_fields) => { + fmt_fields.format_fields(writer, fields) + }, } } } diff --git a/client/tracing/src/logging/layers/prefix_layer.rs b/client/tracing/src/logging/layers/prefix_layer.rs index 836ffd2adda8e..470ffd61f02c8 100644 --- a/client/tracing/src/logging/layers/prefix_layer.rs +++ b/client/tracing/src/logging/layers/prefix_layer.rs @@ -42,12 +42,12 @@ where "newly created span with ID {:?} did not exist in the registry; this is a bug!", id ); - return + return; }, }; if span.name() != PREFIX_LOG_SPAN { - return + return; } let mut extensions = span.extensions_mut(); diff --git a/client/tracing/src/logging/stderr_writer.rs b/client/tracing/src/logging/stderr_writer.rs index de78a61af41a2..555d0343cdb74 100644 --- a/client/tracing/src/logging/stderr_writer.rs +++ b/client/tracing/src/logging/stderr_writer.rs @@ -105,9 +105,9 @@ fn log_autoflush_thread() { buffer = BUFFER.lock(); if buffer.len() >= ASYNC_FLUSH_THRESHOLD { // While we were busy flushing we picked up enough logs to do another flush. - continue + continue; } else { - break + break; } } } diff --git a/client/transaction-pool/src/enactment_state.rs b/client/transaction-pool/src/enactment_state.rs index 6aac98641cf85..5ce325176211e 100644 --- a/client/transaction-pool/src/enactment_state.rs +++ b/client/transaction-pool/src/enactment_state.rs @@ -87,7 +87,7 @@ where // block was already finalized if self.recent_finalized_block == new_hash { log::debug!(target: "txpool", "handle_enactment: block already finalized"); - return Ok(None) + return Ok(None); } // compute actual tree route from best_block to notified block, and use @@ -109,7 +109,7 @@ where "Recently finalized block {} would be retracted by ChainEvent {}, skipping", self.recent_finalized_block, new_hash ); - return Ok(None) + return Ok(None); } if finalized { @@ -124,7 +124,7 @@ where target: "txpool", "handle_enactment: no newly enacted blocks since recent best block" ); - return Ok(None) + return Ok(None); } // otherwise enacted finalized block becomes best block... diff --git a/client/transaction-pool/src/graph/base_pool.rs b/client/transaction-pool/src/graph/base_pool.rs index 8e0422739cc63..6596b1e6da416 100644 --- a/client/transaction-pool/src/graph/base_pool.rs +++ b/client/transaction-pool/src/graph/base_pool.rs @@ -268,7 +268,7 @@ impl BasePool) -> error::Result> { if self.is_imported(&tx.hash) { - return Err(error::Error::AlreadyImported(Box::new(tx.hash))) + return Err(error::Error::AlreadyImported(Box::new(tx.hash))); } let tx = WaitingTransaction::new(tx, self.ready.provided_tags(), &self.recently_pruned); @@ -283,12 +283,12 @@ impl BasePool BasePool + Err(e) => { if first { debug!(target: "txpool", "[{:?}] Error importing: {:?}", current_hash, e); - return Err(e) + return Err(e); } else { failed.push(current_hash); - }, + } + }, } first = false; } @@ -348,7 +349,7 @@ impl BasePool BasePool worst, - Ordering::Equal => + Ordering::Equal => { if worst.insertion_id > transaction.insertion_id { transaction.clone() } else { worst - }, + } + }, Ordering::Greater => transaction.clone(), } }) @@ -420,7 +422,7 @@ impl BasePool BasePool WaitingTransaction { .filter(|tag| { // is true if the tag is already satisfied either via transaction in the pool // or one that was recently included. - let is_provided = provided.contains_key(&**tag) || - recently_pruned.iter().any(|x| x.contains(&**tag)); + let is_provided = provided.contains_key(&**tag) + || recently_pruned.iter().any(|x| x.contains(&**tag)); !is_provided }) .cloned() diff --git a/client/transaction-pool/src/graph/pool.rs b/client/transaction-pool/src/graph/pool.rs index 7b3a8db15982a..480f006dfe4a9 100644 --- a/client/transaction-pool/src/graph/pool.rs +++ b/client/transaction-pool/src/graph/pool.rs @@ -399,7 +399,7 @@ impl Pool { let ignore_banned = matches!(check, CheckBannedBeforeVerify::No); if let Err(err) = self.validated_pool.check_is_known(&hash, ignore_banned) { - return (hash, ValidatedTransaction::Invalid(hash, err)) + return (hash, ValidatedTransaction::Invalid(hash, err)); } let validation_result = self @@ -414,7 +414,7 @@ impl Pool { }; let validity = match status { - Ok(validity) => + Ok(validity) => { if validity.provides.is_empty() { ValidatedTransaction::Invalid(hash, error::Error::NoTagsProvided.into()) } else { @@ -426,11 +426,14 @@ impl Pool { bytes, validity, ) - }, - Err(TransactionValidityError::Invalid(e)) => - ValidatedTransaction::Invalid(hash, error::Error::InvalidTransaction(e).into()), - Err(TransactionValidityError::Unknown(e)) => - ValidatedTransaction::Unknown(hash, error::Error::UnknownTransaction(e).into()), + } + }, + Err(TransactionValidityError::Invalid(e)) => { + ValidatedTransaction::Invalid(hash, error::Error::InvalidTransaction(e).into()) + }, + Err(TransactionValidityError::Unknown(e)) => { + ValidatedTransaction::Unknown(hash, error::Error::UnknownTransaction(e).into()) + }, }; (hash, validity) diff --git a/client/transaction-pool/src/graph/ready.rs b/client/transaction-pool/src/graph/ready.rs index 220e69b13e7eb..b27022c46a342 100644 --- a/client/transaction-pool/src/graph/ready.rs +++ b/client/transaction-pool/src/graph/ready.rs @@ -431,7 +431,7 @@ impl ReadyTransactions { // early exit if we are not replacing anything. if replace_hashes.is_empty() { - return Ok((vec![], vec![])) + return Ok((vec![], vec![])); } // now check if collective priority is lower than the replacement transaction. @@ -447,7 +447,7 @@ impl ReadyTransactions { // bail - the transaction has too low priority to replace the old ones if old_priority >= tx.priority { - return Err(error::Error::TooLowPriority { old: old_priority, new: tx.priority }) + return Err(error::Error::TooLowPriority { old: old_priority, new: tx.priority }); } // construct a list of unlocked transactions @@ -548,7 +548,7 @@ impl Iterator for BestIterator { "[{:?}] Skipping invalid child transaction while iterating.", hash ); - continue + continue; } let ready = match self.all.get(hash).cloned() { @@ -574,7 +574,7 @@ impl Iterator for BestIterator { } } - return Some(best.transaction) + return Some(best.transaction); } } } @@ -773,18 +773,18 @@ mod tests { }; // higher priority = better assert!( - TransactionRef { transaction: Arc::new(with_priority(3, 3)), insertion_id: 1 } > - TransactionRef { transaction: Arc::new(with_priority(2, 3)), insertion_id: 2 } + TransactionRef { transaction: Arc::new(with_priority(3, 3)), insertion_id: 1 } + > TransactionRef { transaction: Arc::new(with_priority(2, 3)), insertion_id: 2 } ); // lower validity = better assert!( - TransactionRef { transaction: Arc::new(with_priority(3, 2)), insertion_id: 1 } > - TransactionRef { transaction: Arc::new(with_priority(3, 3)), insertion_id: 2 } + TransactionRef { transaction: Arc::new(with_priority(3, 2)), insertion_id: 1 } + > TransactionRef { transaction: Arc::new(with_priority(3, 3)), insertion_id: 2 } ); // lower insertion_id = better assert!( - TransactionRef { transaction: Arc::new(with_priority(3, 3)), insertion_id: 1 } > - TransactionRef { transaction: Arc::new(with_priority(3, 3)), insertion_id: 2 } + TransactionRef { transaction: Arc::new(with_priority(3, 3)), insertion_id: 1 } + > TransactionRef { transaction: Arc::new(with_priority(3, 3)), insertion_id: 2 } ); } diff --git a/client/transaction-pool/src/graph/rotator.rs b/client/transaction-pool/src/graph/rotator.rs index 47e00a1292155..b0ce60b06357e 100644 --- a/client/transaction-pool/src/graph/rotator.rs +++ b/client/transaction-pool/src/graph/rotator.rs @@ -88,7 +88,7 @@ impl PoolRotator { xt: &Transaction, ) -> bool { if xt.valid_till > current_block { - return false + return false; } self.ban(now, iter::once(xt.hash.clone())); diff --git a/client/transaction-pool/src/graph/validated_pool.rs b/client/transaction-pool/src/graph/validated_pool.rs index dcb8195073733..b77d3a9a7f0c3 100644 --- a/client/transaction-pool/src/graph/validated_pool.rs +++ b/client/transaction-pool/src/graph/validated_pool.rs @@ -185,8 +185,9 @@ impl ValidatedPool { results .into_iter() .map(|res| match res { - Ok(ref hash) if removed.contains(hash) => - Err(error::Error::ImmediatelyDropped.into()), + Ok(ref hash) if removed.contains(hash) => { + Err(error::Error::ImmediatelyDropped.into()) + }, other => other, }) .collect() @@ -197,7 +198,7 @@ impl ValidatedPool { match tx { ValidatedTransaction::Valid(tx) => { if !tx.propagate && !(self.is_validator.0)() { - return Err(error::Error::Unactionable.into()) + return Err(error::Error::Unactionable.into()); } let imported = self.pool.write().import(tx)?; @@ -206,7 +207,7 @@ impl ValidatedPool { let sinks = &mut self.import_notification_sinks.lock(); sinks.retain_mut(|sink| match sink.try_send(*hash) { Ok(()) => true, - Err(e) => + Err(e) => { if e.is_full() { log::warn!( target: "txpool", @@ -216,7 +217,8 @@ impl ValidatedPool { true } else { false - }, + } + }, }); } @@ -241,8 +243,8 @@ impl ValidatedPool { let future_limit = &self.options.future; log::debug!(target: "txpool", "Pool Status: {:?}", status); - if ready_limit.is_exceeded(status.ready, status.ready_bytes) || - future_limit.is_exceeded(status.future, status.future_bytes) + if ready_limit.is_exceeded(status.ready, status.ready_bytes) + || future_limit.is_exceeded(status.future, status.future_bytes) { log::debug!( target: "txpool", @@ -403,8 +405,8 @@ impl ValidatedPool { final_statuses.insert(hash, Status::Failed); }, }, - ValidatedTransaction::Invalid(_, _) | - ValidatedTransaction::Unknown(_, _) => { + ValidatedTransaction::Invalid(_, _) + | ValidatedTransaction::Unknown(_, _) => { final_statuses.insert(hash, Status::Failed); }, } @@ -602,7 +604,7 @@ impl ValidatedPool { pub fn remove_invalid(&self, hashes: &[ExtrinsicHash]) -> Vec> { // early exit in case there is no invalid transactions. if hashes.is_empty() { - return vec![] + return vec![]; } log::debug!(target: "txpool", "Removing invalid transactions: {:?}", hashes); diff --git a/client/transaction-pool/src/lib.rs b/client/transaction-pool/src/lib.rs index a441bf9b2a9a0..d17ba996f6078 100644 --- a/client/transaction-pool/src/lib.rs +++ b/client/transaction-pool/src/lib.rs @@ -209,8 +209,9 @@ where ) -> Self { let pool = Arc::new(graph::Pool::new(options, is_validator, pool_api.clone())); let (revalidation_queue, background_task) = match revalidation_type { - RevalidationType::Light => - (revalidation::RevalidationQueue::new(pool_api.clone(), pool.clone()), None), + RevalidationType::Light => { + (revalidation::RevalidationQueue::new(pool_api.clone(), pool.clone()), None) + }, RevalidationType::Full => { let (queue, background) = revalidation::RevalidationQueue::new_background(pool_api.clone(), pool.clone()); @@ -227,8 +228,9 @@ where pool, revalidation_queue: Arc::new(revalidation_queue), revalidation_strategy: Arc::new(Mutex::new(match revalidation_type { - RevalidationType::Light => - RevalidationStrategy::Light(RevalidationStatus::NotScheduled), + RevalidationType::Light => { + RevalidationStrategy::Light(RevalidationStatus::NotScheduled) + }, RevalidationType::Full => RevalidationStrategy::Always, })), ready_poll: Arc::new(Mutex::new(ReadyPoll::new(best_block_number))), @@ -343,13 +345,13 @@ where // There could be transaction being added because of some re-org happening at the relevant // block, but this is relative unlikely. if status.ready == 0 && status.future == 0 { - return async { Box::new(std::iter::empty()) as Box<_> }.boxed() + return async { Box::new(std::iter::empty()) as Box<_> }.boxed(); } if self.ready_poll.lock().updated_at() >= at { log::trace!(target: "txpool", "Transaction pool already processed block #{}", at); let iterator: ReadyIteratorFor = Box::new(self.pool.validated_pool().ready()); - return async move { iterator }.boxed() + return async move { iterator }.boxed(); } self.ready_poll @@ -536,8 +538,8 @@ impl RevalidationStatus { }, Self::Scheduled(revalidate_at_time, revalidate_at_block) => { let is_required = - revalidate_at_time.map(|at| Instant::now() >= at).unwrap_or(false) || - revalidate_at_block.map(|at| block >= at).unwrap_or(false); + revalidate_at_time.map(|at| Instant::now() >= at).unwrap_or(false) + || revalidate_at_block.map(|at| block >= at).unwrap_or(false); if is_required { *self = Self::InProgress; } @@ -571,11 +573,11 @@ async fn prune_known_txs_for_block h, Ok(None) => { log::debug!(target: "txpool", "Could not find header for {:?}.", block_hash); - return hashes + return hashes; }, Err(e) => { log::debug!(target: "txpool", "Error retrieving header for {:?}: {}", block_hash, e); - return hashes + return hashes; }, }; @@ -610,7 +612,7 @@ where "Skipping ChainEvent - no last block in tree route {:?}", tree_route, ); - return + return; }, }; @@ -737,10 +739,11 @@ where let compute_tree_route = |from, to| -> Result, String> { match self.api.tree_route(from, to) { Ok(tree_route) => Ok(tree_route), - Err(e) => + Err(e) => { return Err(format!( "Error occurred while computing tree_route from {from:?} to {to:?}: {e}" - )), + )) + }, } }; diff --git a/client/transaction-pool/src/revalidation.rs b/client/transaction-pool/src/revalidation.rs index b4b4299240a32..d82bd015a1f58 100644 --- a/client/transaction-pool/src/revalidation.rs +++ b/client/transaction-pool/src/revalidation.rs @@ -188,7 +188,7 @@ impl RevalidationWorker { ext_hash, ); - continue + continue; } self.block_ordered diff --git a/client/utils/src/mpsc.rs b/client/utils/src/mpsc.rs index ee3fba4a5ee67..620eccbfc7d76 100644 --- a/client/utils/src/mpsc.rs +++ b/client/utils/src/mpsc.rs @@ -119,7 +119,7 @@ mod inner { let mut count = 0; loop { if self.1.is_terminated() { - break + break; } match self.try_next() { diff --git a/client/utils/src/status_sinks.rs b/client/utils/src/status_sinks.rs index a1d965d08085e..03313e1f20d79 100644 --- a/client/utils/src/status_sinks.rs +++ b/client/utils/src/status_sinks.rs @@ -151,7 +151,7 @@ impl<'a, T> Drop for ReadySinkEvent<'a, T> { fn drop(&mut self) { if let Some(sender) = self.sender.take() { if sender.is_closed() { - return + return; } let _ = self.sinks.entries_tx.unbounded_send(YieldAfter { diff --git a/frame/alliance/src/lib.rs b/frame/alliance/src/lib.rs index fca17e69c7652..5c407192f067c 100644 --- a/frame/alliance/src/lib.rs +++ b/frame/alliance/src/lib.rs @@ -1016,9 +1016,9 @@ pub mod pallet { impl, I: 'static> Pallet { /// Check if the Alliance has been initialized. fn is_initialized() -> bool { - Self::has_member(MemberRole::Founder) || - Self::has_member(MemberRole::Fellow) || - Self::has_member(MemberRole::Ally) + Self::has_member(MemberRole::Founder) + || Self::has_member(MemberRole::Fellow) + || Self::has_member(MemberRole::Ally) } /// Check if a given role has any members. @@ -1217,7 +1217,7 @@ impl, I: 'static> Pallet { let res = judgement(who); if res.is_err() { if let Some(parent) = T::IdentityVerifier::super_account_id(who) { - return judgement(&parent) + return judgement(&parent); } } res diff --git a/frame/assets/src/functions.rs b/frame/assets/src/functions.rs index f7f11cafecbe2..2436ed6364e25 100644 --- a/frame/assets/src/functions.rs +++ b/frame/assets/src/functions.rs @@ -121,21 +121,21 @@ impl, I: 'static> Pallet { None => return DepositConsequence::UnknownAsset, }; if increase_supply && details.supply.checked_add(&amount).is_none() { - return DepositConsequence::Overflow + return DepositConsequence::Overflow; } if let Some(balance) = Self::maybe_balance(id, who) { if balance.checked_add(&amount).is_none() { - return DepositConsequence::Overflow + return DepositConsequence::Overflow; } } else { if amount < details.min_balance { - return DepositConsequence::BelowMinimum + return DepositConsequence::BelowMinimum; } if !details.is_sufficient && !frame_system::Pallet::::can_inc_consumer(who) { - return DepositConsequence::CannotCreate + return DepositConsequence::CannotCreate; } if details.is_sufficient && details.sufficients.checked_add(1).is_none() { - return DepositConsequence::Overflow + return DepositConsequence::Overflow; } } @@ -155,20 +155,20 @@ impl, I: 'static> Pallet { None => return UnknownAsset, }; if details.supply.checked_sub(&amount).is_none() { - return Underflow + return Underflow; } if details.status == AssetStatus::Frozen { - return Frozen + return Frozen; } if amount.is_zero() { - return Success + return Success; } let account = match Account::::get(id, who) { Some(a) => a, None => return NoFunds, }; if account.is_frozen { - return Frozen + return Frozen; } if let Some(rest) = account.balance.checked_sub(&amount) { if let Some(frozen) = T::Freezer::frozen_balance(id, who) { @@ -258,7 +258,7 @@ impl, I: 'static> Pallet { Ok(dust) => actual.saturating_add(dust), //< guaranteed by reducible_balance Err(e) => { debug_assert!(false, "passed from reducible_balance; qed"); - return Err(e) + return Err(e); }, }; @@ -384,7 +384,7 @@ impl, I: 'static> Pallet { ) -> DispatchResult, ) -> DispatchResult { if amount.is_zero() { - return Ok(()) + return Ok(()); } Self::can_increase(id, beneficiary, amount, true).into_result()?; @@ -470,7 +470,7 @@ impl, I: 'static> Pallet { ) -> DispatchResult, ) -> Result { if amount.is_zero() { - return Ok(amount) + return Ok(amount); } let details = Asset::::get(id).ok_or(Error::::Unknown)?; @@ -493,7 +493,7 @@ impl, I: 'static> Pallet { debug_assert!(account.balance.is_zero(), "checked in prep; qed"); target_died = Some(Self::dead_account(target, details, &account.reason, false)); if let Some(Remove) = target_died { - return Ok(()) + return Ok(()); } }; *maybe_account = Some(account); @@ -546,7 +546,7 @@ impl, I: 'static> Pallet { ) -> Result<(T::Balance, Option), DispatchError> { // Early exit if no-op. if amount.is_zero() { - return Ok((amount, None)) + return Ok((amount, None)); } let details = Asset::::get(id).ok_or(Error::::Unknown)?; ensure!(details.status == AssetStatus::Live, Error::::AssetNotLive); @@ -569,7 +569,7 @@ impl, I: 'static> Pallet { // Skip if source == dest if source == dest { - return Ok(()) + return Ok(()); } // Burn any dust if needed. @@ -614,7 +614,7 @@ impl, I: 'static> Pallet { Some(Self::dead_account(source, details, &source_account.reason, false)); if let Some(Remove) = source_died { Account::::remove(id, &source); - return Ok(()) + return Ok(()); } } Account::::insert(id, &source, &source_account); @@ -706,7 +706,7 @@ impl, I: 'static> Pallet { let _ = Self::dead_account(&who, &mut details, &v.reason, true); dead_accounts.push(who); if dead_accounts.len() >= (max_items as usize) { - break + break; } } remaining_accounts = details.accounts; @@ -746,7 +746,7 @@ impl, I: 'static> Pallet { removed_approvals = removed_approvals.saturating_add(1); details.approvals = details.approvals.saturating_sub(1); if removed_approvals >= max_items { - break + break; } } Self::deposit_event(Event::ApprovalsDestroyed { diff --git a/frame/assets/src/impl_stored_map.rs b/frame/assets/src/impl_stored_map.rs index a4669c776ed41..9e9af9d48c1ee 100644 --- a/frame/assets/src/impl_stored_map.rs +++ b/frame/assets/src/impl_stored_map.rs @@ -42,7 +42,7 @@ impl, I: 'static> StoredMap<(T::AssetId, T::AccountId), T::Extra> f if let Some(ref mut account) = maybe_account { account.extra = extra; } else { - return Err(DispatchError::NoProviders.into()) + return Err(DispatchError::NoProviders.into()); } } else { // They want to delete it. Let this pass if the item never existed anyway. diff --git a/frame/assets/src/lib.rs b/frame/assets/src/lib.rs index cdd0553218225..e51c7cc0427f8 100644 --- a/frame/assets/src/lib.rs +++ b/frame/assets/src/lib.rs @@ -1001,7 +1001,7 @@ pub mod pallet { ensure!(details.status == AssetStatus::Live, Error::::LiveAsset); ensure!(origin == details.owner, Error::::NoPermission); if details.owner == owner { - return Ok(()) + return Ok(()); } let metadata_deposit = Metadata::::get(id).deposit; diff --git a/frame/assets/src/types.rs b/frame/assets/src/types.rs index 557af6bd3f488..88c3b2cd8193a 100644 --- a/frame/assets/src/types.rs +++ b/frame/assets/src/types.rs @@ -102,7 +102,7 @@ pub enum ExistenceReason { impl ExistenceReason { pub(crate) fn take_deposit(&mut self) -> Option { if !matches!(self, ExistenceReason::DepositHeld(_)) { - return None + return None; } if let ExistenceReason::DepositHeld(deposit) = sp_std::mem::replace(self, ExistenceReason::DepositRefunded) diff --git a/frame/aura/src/lib.rs b/frame/aura/src/lib.rs index ff2c5df04a453..e2a70f53bb532 100644 --- a/frame/aura/src/lib.rs +++ b/frame/aura/src/lib.rs @@ -181,7 +181,7 @@ impl Pallet { let pre_runtime_digests = digest.logs.iter().filter_map(|d| d.as_pre_runtime()); for (id, mut data) in pre_runtime_digests { if id == AURA_ENGINE_ID { - return Slot::decode(&mut data).ok() + return Slot::decode(&mut data).ok(); } } @@ -252,7 +252,7 @@ impl FindAuthor for Pallet { if id == AURA_ENGINE_ID { let slot = Slot::decode(&mut data).ok()?; let author_index = *slot % Self::authorities().len() as u64; - return Some(author_index as u32) + return Some(author_index as u32); } } diff --git a/frame/authorship/src/lib.rs b/frame/authorship/src/lib.rs index c08e773abe3a7..5d081aa932276 100644 --- a/frame/authorship/src/lib.rs +++ b/frame/authorship/src/lib.rs @@ -123,7 +123,7 @@ where if let Some(ref author) = author { if !acc.insert((*number, author.clone())) { - return Err("more than one uncle per number per author included") + return Err("more than one uncle per number per author included"); } } @@ -243,7 +243,7 @@ pub mod pallet { ensure!(new_uncles.len() <= MAX_UNCLES, Error::::TooManyUncles); if >::get() { - return Err(Error::::UnclesAlreadySet.into()) + return Err(Error::::UnclesAlreadySet.into()); } >::put(true); @@ -282,7 +282,7 @@ pub mod pallet { existing_hashes.push(hash); if new_uncles.len() == MAX_UNCLES { - break + break; } }, Err(_) => { @@ -304,8 +304,9 @@ pub mod pallet { _data: &InherentData, ) -> result::Result<(), Self::Error> { match call { - Call::set_uncles { ref new_uncles } if new_uncles.len() > MAX_UNCLES => - Err(InherentError::Uncles(Error::::TooManyUncles.as_str().into())), + Call::set_uncles { ref new_uncles } if new_uncles.len() > MAX_UNCLES => { + Err(InherentError::Uncles(Error::::TooManyUncles.as_str().into())) + }, _ => Ok(()), } } @@ -324,7 +325,7 @@ impl Pallet { pub fn author() -> Option { // Check the memoized storage value. if let Some(author) = >::get() { - return Some(author) + return Some(author); } let digest = >::digest(); @@ -383,30 +384,30 @@ impl Pallet { let hash = uncle.hash(); if uncle.number() < &One::one() { - return Err(Error::::GenesisUncle.into()) + return Err(Error::::GenesisUncle.into()); } if uncle.number() > &maximum_height { - return Err(Error::::TooHighUncle.into()) + return Err(Error::::TooHighUncle.into()); } { let parent_number = *uncle.number() - One::one(); let parent_hash = >::block_hash(&parent_number); if &parent_hash != uncle.parent_hash() { - return Err(Error::::InvalidUncleParent.into()) + return Err(Error::::InvalidUncleParent.into()); } } if uncle.number() < &minimum_height { - return Err(Error::::OldUncle.into()) + return Err(Error::::OldUncle.into()); } let duplicate = existing_uncles.into_iter().any(|h| *h == hash); let in_chain = >::block_hash(uncle.number()) == hash; if duplicate || in_chain { - return Err(Error::::UncleAlreadyIncluded.into()) + return Err(Error::::UncleAlreadyIncluded.into()); } // check uncle validity. @@ -508,7 +509,7 @@ mod tests { { for (id, mut data) in digests { if id == TEST_ID { - return u64::decode(&mut data).ok() + return u64::decode(&mut data).ok(); } } @@ -532,9 +533,9 @@ mod tests { Err(_) => return Err("wrong seal"), Ok(a) => { if a != author { - return Err("wrong author in seal") + return Err("wrong author in seal"); } - break + break; }, } } diff --git a/frame/babe/src/equivocation.rs b/frame/babe/src/equivocation.rs index f55bda751887d..2cc7a6c940704 100644 --- a/frame/babe/src/equivocation.rs +++ b/frame/babe/src/equivocation.rs @@ -196,7 +196,7 @@ impl Pallet { "rejecting unsigned report equivocation transaction because it is not local/in-block.", ); - return InvalidTransaction::Call.into() + return InvalidTransaction::Call.into(); }, } diff --git a/frame/babe/src/lib.rs b/frame/babe/src/lib.rs index eadaa036332fa..85f1799324841 100644 --- a/frame/babe/src/lib.rs +++ b/frame/babe/src/lib.rs @@ -488,7 +488,7 @@ impl FindAuthor for Pallet { for (id, mut data) in digests.into_iter() { if id == BABE_ENGINE_ID { let pre_digest: PreDigest = PreDigest::decode(&mut data).ok()?; - return Some(pre_digest.authority_index()) + return Some(pre_digest.authority_index()); } } @@ -733,7 +733,7 @@ impl Pallet { // let's ensure that we only do the initialization once per block let initialized = Self::initialized().is_some(); if initialized { - return + return; } let pre_digest = @@ -804,7 +804,7 @@ impl Pallet { // validate the equivocation proof if !sp_consensus_babe::check_equivocation_proof(equivocation_proof) { - return Err(Error::::InvalidEquivocationProof.into()) + return Err(Error::::InvalidEquivocationProof.into()); } let validator_set_count = key_owner_proof.validator_count(); @@ -816,7 +816,7 @@ impl Pallet { // check that the slot number is consistent with the session index // in the key ownership proof (i.e. slot is for that epoch) if epoch_index != session_index { - return Err(Error::::InvalidKeyOwnershipProof.into()) + return Err(Error::::InvalidKeyOwnershipProof.into()); } // check the membership proof and extract the offender's id diff --git a/frame/babe/src/tests.rs b/frame/babe/src/tests.rs index d4132e6378540..c93b133897865 100644 --- a/frame/babe/src/tests.rs +++ b/frame/babe/src/tests.rs @@ -242,8 +242,8 @@ fn can_estimate_current_epoch_progress() { ); } else { assert!( - Babe::estimate_current_session_progress(i).0.unwrap() < - Permill::from_percent(100) + Babe::estimate_current_session_progress(i).0.unwrap() + < Permill::from_percent(100) ); } } @@ -485,7 +485,7 @@ fn report_equivocation_current_session_works() { // check that the balances of all other validators are left intact. for validator in &validators { if *validator == offending_validator_id { - continue + continue; } assert_eq!(Balances::total_balance(validator), 10_000_000); diff --git a/frame/bags-list/remote-tests/src/lib.rs b/frame/bags-list/remote-tests/src/lib.rs index fc25e3b65ddb1..cb4db5257f1a9 100644 --- a/frame/bags-list/remote-tests/src/lib.rs +++ b/frame/bags-list/remote-tests/src/lib.rs @@ -80,7 +80,7 @@ pub fn display_and_check_bags>( Some(bag) => bag, None => { log::info!(target: LOG_TARGET, "{} NO VOTERS.", pretty_thresh); - continue + continue; }, }; diff --git a/frame/bags-list/src/list/mod.rs b/frame/bags-list/src/list/mod.rs index 272526ad1a636..744cfe79bb21a 100644 --- a/frame/bags-list/src/list/mod.rs +++ b/frame/bags-list/src/list/mod.rs @@ -143,7 +143,7 @@ impl, I: 'static> List { pub fn migrate(old_thresholds: &[T::Score]) -> u32 { let new_thresholds = T::BagThresholds::get(); if new_thresholds == old_thresholds { - return 0 + return 0; } // we can't check all preconditions, but we can check one @@ -178,7 +178,7 @@ impl, I: 'static> List { if !affected_old_bags.insert(affected_bag) { // If the previous threshold list was [10, 20], and we insert [3, 5], then there's // no point iterating through bag 10 twice. - continue + continue; } if let Some(bag) = Bag::::get(affected_bag) { @@ -190,7 +190,7 @@ impl, I: 'static> List { // a removed bag means that all members of that bag must be rebagged for removed_bag in removed_bags.clone() { if !affected_old_bags.insert(removed_bag) { - continue + continue; } if let Some(bag) = Bag::::get(removed_bag) { @@ -249,15 +249,14 @@ impl, I: 'static> List { // easier; they can just configure `type BagThresholds = ()`. let thresholds = T::BagThresholds::get(); let iter = thresholds.iter().copied(); - let iter: Box> = if thresholds.last() == - Some(&T::Score::max_value()) - { - // in the event that they included it, we can just pass the iterator through unchanged. - Box::new(iter.rev()) - } else { - // otherwise, insert it here. - Box::new(iter.chain(iter::once(T::Score::max_value())).rev()) - }; + let iter: Box> = + if thresholds.last() == Some(&T::Score::max_value()) { + // in the event that they included it, we can just pass the iterator through unchanged. + Box::new(iter.rev()) + } else { + // otherwise, insert it here. + Box::new(iter.chain(iter::once(T::Score::max_value())).rev()) + }; iter.filter_map(Bag::get).flat_map(|bag| bag.iter()) } @@ -313,7 +312,7 @@ impl, I: 'static> List { /// Returns an error if the list already contains `id`. pub(crate) fn insert(id: T::AccountId, score: T::Score) -> Result<(), ListError> { if Self::contains(&id) { - return Err(ListError::Duplicate) + return Err(ListError::Duplicate); } let bag_score = notional_bag_for::(score); @@ -339,7 +338,7 @@ impl, I: 'static> List { /// Remove an id from the list, returning an error if `id` does not exists. pub(crate) fn remove(id: &T::AccountId) -> Result<(), ListError> { if !Self::contains(id) { - return Err(ListError::NodeNotFound) + return Err(ListError::NodeNotFound); } let _ = Self::remove_many(sp_std::iter::once(id)); Ok(()) @@ -567,15 +566,14 @@ impl, I: 'static> List { let thresholds = T::BagThresholds::get(); let iter = thresholds.iter().copied(); - let iter: Box> = if thresholds.last() == - Some(&T::Score::max_value()) - { - // in the event that they included it, we can just pass the iterator through unchanged. - Box::new(iter) - } else { - // otherwise, insert it here. - Box::new(iter.chain(sp_std::iter::once(T::Score::max_value()))) - }; + let iter: Box> = + if thresholds.last() == Some(&T::Score::max_value()) { + // in the event that they included it, we can just pass the iterator through unchanged. + Box::new(iter) + } else { + // otherwise, insert it here. + Box::new(iter.chain(sp_std::iter::once(T::Score::max_value()))) + }; iter.filter_map(|t| { Bag::::get(t) @@ -693,7 +691,7 @@ impl, I: 'static> Bag { // this should never happen, but this check prevents one path to a worst case // infinite loop. defensive!("system logic error: inserting a node who has the id of tail"); - return + return; }; } @@ -904,9 +902,9 @@ impl, I: 'static> Node { "node does not exist in the expected bag" ); - let non_terminal_check = !self.is_terminal() && - expected_bag.head.as_ref() != Some(id) && - expected_bag.tail.as_ref() != Some(id); + let non_terminal_check = !self.is_terminal() + && expected_bag.head.as_ref() != Some(id) + && expected_bag.tail.as_ref() != Some(id); let terminal_check = expected_bag.head.as_ref() == Some(id) || expected_bag.tail.as_ref() == Some(id); frame_support::ensure!( diff --git a/frame/bags-list/src/migrations.rs b/frame/bags-list/src/migrations.rs index e1dc9f777e537..c12fa7e723a7f 100644 --- a/frame/bags-list/src/migrations.rs +++ b/frame/bags-list/src/migrations.rs @@ -115,7 +115,7 @@ impl, I: 'static> OnRuntimeUpgrade for AddScore { crate::ListNodes::::insert(node.id, new_node); } - return frame_support::weights::Weight::MAX + return frame_support::weights::Weight::MAX; } #[cfg(feature = "try-runtime")] diff --git a/frame/balances/src/lib.rs b/frame/balances/src/lib.rs index d3085152eba6c..530286552f1ad 100644 --- a/frame/balances/src/lib.rs +++ b/frame/balances/src/lib.rs @@ -647,7 +647,7 @@ impl BitOr for Reasons { type Output = Reasons; fn bitor(self, other: Reasons) -> Reasons { if self == other { - return self + return self; } Reasons::All } @@ -806,11 +806,11 @@ impl, I: 'static> Pallet { mint: bool, ) -> DepositConsequence { if amount.is_zero() { - return DepositConsequence::Success + return DepositConsequence::Success; } if mint && TotalIssuance::::get().checked_add(&amount).is_none() { - return DepositConsequence::Overflow + return DepositConsequence::Overflow; } let new_total_balance = match account.total().checked_add(&amount) { @@ -819,7 +819,7 @@ impl, I: 'static> Pallet { }; if new_total_balance < T::ExistentialDeposit::get() { - return DepositConsequence::BelowMinimum + return DepositConsequence::BelowMinimum; } // NOTE: We assume that we are a provider, so don't need to do any checks in the @@ -834,11 +834,11 @@ impl, I: 'static> Pallet { account: &AccountData, ) -> WithdrawConsequence { if amount.is_zero() { - return WithdrawConsequence::Success + return WithdrawConsequence::Success; } if TotalIssuance::::get().checked_sub(&amount).is_none() { - return WithdrawConsequence::Underflow + return WithdrawConsequence::Underflow; } let new_total_balance = match account.total().checked_sub(&amount) { @@ -855,7 +855,7 @@ impl, I: 'static> Pallet { if frame_system::Pallet::::can_dec_provider(who) { WithdrawConsequence::ReducedToZero(new_total_balance) } else { - return WithdrawConsequence::WouldDie + return WithdrawConsequence::WouldDie; } } else { WithdrawConsequence::Success @@ -870,7 +870,7 @@ impl, I: 'static> Pallet { // Eventual free funds must be no less than the frozen balance. let min_balance = account.frozen(Reasons::All); if new_free_balance < min_balance { - return WithdrawConsequence::Frozen + return WithdrawConsequence::Frozen; } success @@ -1013,14 +1013,14 @@ impl, I: 'static> Pallet { status: Status, ) -> Result { if value.is_zero() { - return Ok(Zero::zero()) + return Ok(Zero::zero()); } if slashed == beneficiary { return match status { Status::Free => Ok(value.saturating_sub(Self::unreserve(slashed, value))), Status::Reserved => Ok(value.saturating_sub(Self::reserved_balance(slashed))), - } + }; } let ((actual, _maybe_one_dust), _maybe_other_dust) = Self::try_mutate_account_with_dust( @@ -1033,16 +1033,18 @@ impl, I: 'static> Pallet { let actual = cmp::min(from_account.reserved, value); ensure!(best_effort || actual == value, Error::::InsufficientBalance); match status { - Status::Free => + Status::Free => { to_account.free = to_account .free .checked_add(&actual) - .ok_or(ArithmeticError::Overflow)?, - Status::Reserved => + .ok_or(ArithmeticError::Overflow)? + }, + Status::Reserved => { to_account.reserved = to_account .reserved .checked_add(&actual) - .ok_or(ArithmeticError::Overflow)?, + .ok_or(ArithmeticError::Overflow)? + }, } from_account.reserved -= actual; Ok(actual) @@ -1101,7 +1103,7 @@ impl, I: 'static> fungible::Inspect for Pallet impl, I: 'static> fungible::Mutate for Pallet { fn mint_into(who: &T::AccountId, amount: Self::Balance) -> DispatchResult { if amount.is_zero() { - return Ok(()) + return Ok(()); } Self::try_mutate_account(who, |account, _is_new| -> DispatchResult { Self::deposit_consequence(who, amount, account, true).into_result()?; @@ -1118,7 +1120,7 @@ impl, I: 'static> fungible::Mutate for Pallet { amount: Self::Balance, ) -> Result { if amount.is_zero() { - return Ok(Self::Balance::zero()) + return Ok(Self::Balance::zero()); } let actual = Self::try_mutate_account( who, @@ -1177,7 +1179,7 @@ impl, I: 'static> fungible::InspectHold for Pallet, I: 'static> fungible::InspectHold for Pallet, I: 'static> fungible::MutateHold for Pallet { fn hold(who: &T::AccountId, amount: Self::Balance) -> DispatchResult { if amount.is_zero() { - return Ok(()) + return Ok(()); } ensure!(Self::can_reserve(who, amount), Error::::InsufficientBalance); Self::mutate_account(who, |a| { @@ -1206,7 +1208,7 @@ impl, I: 'static> fungible::MutateHold for Pallet Result { if amount.is_zero() { - return Ok(amount) + return Ok(amount); } // Done on a best-effort basis. Self::try_mutate_account(who, |a, _| { @@ -1412,7 +1414,7 @@ where // Check if `value` amount of free balance can be slashed from `who`. fn can_slash(who: &T::AccountId, value: Self::Balance) -> bool { if value.is_zero() { - return true + return true; } Self::free_balance(who) >= value } @@ -1429,7 +1431,7 @@ where // Is a no-op if amount to be burned is zero. fn burn(mut amount: Self::Balance) -> Self::PositiveImbalance { if amount.is_zero() { - return PositiveImbalance::zero() + return PositiveImbalance::zero(); } >::mutate(|issued| { *issued = issued.checked_sub(&amount).unwrap_or_else(|| { @@ -1445,7 +1447,7 @@ where // Is a no-op if amount to be issued it zero. fn issue(mut amount: Self::Balance) -> Self::NegativeImbalance { if amount.is_zero() { - return NegativeImbalance::zero() + return NegativeImbalance::zero(); } >::mutate(|issued| { *issued = issued.checked_add(&amount).unwrap_or_else(|| { @@ -1475,7 +1477,7 @@ where new_balance: T::Balance, ) -> DispatchResult { if amount.is_zero() { - return Ok(()) + return Ok(()); } let min_balance = Self::account(who).frozen(reasons.into()); ensure!(new_balance >= min_balance, Error::::LiquidityRestrictions); @@ -1491,7 +1493,7 @@ where existence_requirement: ExistenceRequirement, ) -> DispatchResult { if value.is_zero() || transactor == dest { - return Ok(()) + return Ok(()); } Self::try_mutate_account_with_dust( @@ -1559,10 +1561,10 @@ where /// inconsistent or `can_slash` wasn't used appropriately. fn slash(who: &T::AccountId, value: Self::Balance) -> (Self::NegativeImbalance, Self::Balance) { if value.is_zero() { - return (NegativeImbalance::zero(), Zero::zero()) + return (NegativeImbalance::zero(), Zero::zero()); } if Self::total_balance(who).is_zero() { - return (NegativeImbalance::zero(), value) + return (NegativeImbalance::zero(), value); } for attempt in 0..2 { @@ -1611,7 +1613,7 @@ where who: who.clone(), amount: value.saturating_sub(not_slashed), }); - return (imbalance, not_slashed) + return (imbalance, not_slashed); }, Err(_) => (), } @@ -1629,7 +1631,7 @@ where value: Self::Balance, ) -> Result { if value.is_zero() { - return Ok(PositiveImbalance::zero()) + return Ok(PositiveImbalance::zero()); } Self::try_mutate_account( @@ -1654,7 +1656,7 @@ where /// - `value` is so large it would cause the balance of `who` to overflow. fn deposit_creating(who: &T::AccountId, value: Self::Balance) -> Self::PositiveImbalance { if value.is_zero() { - return Self::PositiveImbalance::zero() + return Self::PositiveImbalance::zero(); } Self::try_mutate_account( @@ -1687,7 +1689,7 @@ where liveness: ExistenceRequirement, ) -> result::Result { if value.is_zero() { - return Ok(NegativeImbalance::zero()) + return Ok(NegativeImbalance::zero()); } Self::try_mutate_account( @@ -1760,7 +1762,7 @@ where /// Always `true` if value to be reserved is zero. fn can_reserve(who: &T::AccountId, value: Self::Balance) -> bool { if value.is_zero() { - return true + return true; } Self::account(who).free.checked_sub(&value).map_or(false, |new_balance| { Self::ensure_can_withdraw(who, value, WithdrawReasons::RESERVE, new_balance).is_ok() @@ -1776,7 +1778,7 @@ where /// Is a no-op if value to be reserved is zero. fn reserve(who: &T::AccountId, value: Self::Balance) -> DispatchResult { if value.is_zero() { - return Ok(()) + return Ok(()); } Self::try_mutate_account(who, |account, _| -> DispatchResult { @@ -1798,10 +1800,10 @@ where /// NOTE: returns amount value which wasn't successfully unreserved. fn unreserve(who: &T::AccountId, value: Self::Balance) -> Self::Balance { if value.is_zero() { - return Zero::zero() + return Zero::zero(); } if Self::total_balance(who).is_zero() { - return value + return value; } let actual = match Self::mutate_account(who, |account| { @@ -1817,7 +1819,7 @@ where // This should never happen since we don't alter the total amount in the account. // If it ever does, then we should fail gracefully though, indicating that nothing // could be done. - return value + return value; }, }; @@ -1834,10 +1836,10 @@ where value: Self::Balance, ) -> (Self::NegativeImbalance, Self::Balance) { if value.is_zero() { - return (NegativeImbalance::zero(), Zero::zero()) + return (NegativeImbalance::zero(), Zero::zero()); } if Self::total_balance(who).is_zero() { - return (NegativeImbalance::zero(), value) + return (NegativeImbalance::zero(), value); } // NOTE: `mutate_account` may fail if it attempts to reduce the balance to the point that an @@ -1866,7 +1868,7 @@ where who: who.clone(), amount: value.saturating_sub(not_slashed), }); - return (imbalance, not_slashed) + return (imbalance, not_slashed); }, Err(_) => (), } @@ -1915,7 +1917,7 @@ where value: Self::Balance, ) -> DispatchResult { if value.is_zero() { - return Ok(()) + return Ok(()); } Reserves::::try_mutate(who, |reserves| -> DispatchResult { @@ -1944,7 +1946,7 @@ where value: Self::Balance, ) -> Self::Balance { if value.is_zero() { - return Zero::zero() + return Zero::zero(); } Reserves::::mutate_exists(who, |maybe_reserves| -> Self::Balance { @@ -1991,7 +1993,7 @@ where value: Self::Balance, ) -> (Self::NegativeImbalance, Self::Balance) { if value.is_zero() { - return (NegativeImbalance::zero(), Zero::zero()) + return (NegativeImbalance::zero(), Zero::zero()); } Reserves::::mutate(who, |reserves| -> (Self::NegativeImbalance, Self::Balance) { @@ -2030,15 +2032,16 @@ where status: Status, ) -> Result { if value.is_zero() { - return Ok(Zero::zero()) + return Ok(Zero::zero()); } if slashed == beneficiary { return match status { Status::Free => Ok(Self::unreserve_named(id, slashed, value)), - Status::Reserved => - Ok(value.saturating_sub(Self::reserved_balance_named(id, slashed))), - } + Status::Reserved => { + Ok(value.saturating_sub(Self::reserved_balance_named(id, slashed))) + }, + }; } Reserves::::try_mutate(slashed, |reserves| -> Result { @@ -2136,7 +2139,7 @@ where reasons: WithdrawReasons, ) { if amount.is_zero() || reasons.is_empty() { - return + return; } let mut new_lock = Some(BalanceLock { id, amount, reasons: reasons.into() }); let mut locks = Self::locks(who) @@ -2158,7 +2161,7 @@ where reasons: WithdrawReasons, ) { if amount.is_zero() || reasons.is_empty() { - return + return; } let mut new_lock = Some(BalanceLock { id, amount, reasons: reasons.into() }); let mut locks = Self::locks(who) diff --git a/frame/beefy-mmr/primitives/src/lib.rs b/frame/beefy-mmr/primitives/src/lib.rs index f88fb89acaaab..6ecb5c322e7e4 100644 --- a/frame/beefy-mmr/primitives/src/lib.rs +++ b/frame/beefy-mmr/primitives/src/lib.rs @@ -246,7 +246,7 @@ where L: Into>, { if leaf_index >= number_of_leaves { - return false + return false; } let leaf_hash = match leaf.into() { @@ -339,7 +339,7 @@ where "[merkelize_row] Next: {:?}", next.iter().map(|s| array_bytes::bytes2hex("", s.as_ref())).collect::>() ); - return Err(next) + return Err(next); }, } } diff --git a/frame/beefy/src/lib.rs b/frame/beefy/src/lib.rs index 305b158124b67..f06cd3edd0c6f 100644 --- a/frame/beefy/src/lib.rs +++ b/frame/beefy/src/lib.rs @@ -151,11 +151,11 @@ impl Pallet { fn initialize_authorities(authorities: &Vec) -> Result<(), ()> { if authorities.is_empty() { - return Ok(()) + return Ok(()); } if !>::get().is_empty() { - return Err(()) + return Err(()); } let bounded_authorities = diff --git a/frame/benchmarking/src/analysis.rs b/frame/benchmarking/src/analysis.rs index 0b77a92347d03..74fb0b942b482 100644 --- a/frame/benchmarking/src/analysis.rs +++ b/frame/benchmarking/src/analysis.rs @@ -172,7 +172,7 @@ fn linear_regression( let (intercept, params, errors) = raw_linear_regression(&xs, &ys, x_vars, true)?; if intercept >= -0.0001 { // The intercept is positive, or is effectively zero. - return Some((intercept, params, errors[1..].to_vec())) + return Some((intercept, params, errors[1..].to_vec())); } // The intercept is negative. @@ -199,7 +199,7 @@ impl Analysis { // results. Note: We choose the median value because it is more robust to outliers. fn median_value(r: &Vec, selector: BenchmarkSelector) -> Option { if r.is_empty() { - return None + return None; } let mut values: Vec = r @@ -229,7 +229,7 @@ impl Analysis { pub fn median_slopes(r: &Vec, selector: BenchmarkSelector) -> Option { if r[0].components.is_empty() { - return Self::median_value(r, selector) + return Self::median_value(r, selector); } let results = r[0] @@ -329,7 +329,7 @@ impl Analysis { pub fn min_squares_iqr(r: &Vec, selector: BenchmarkSelector) -> Option { if r[0].components.is_empty() || r.len() <= 2 { - return Self::median_value(r, selector) + return Self::median_value(r, selector); } let mut results = BTreeMap::, Vec>::new(); @@ -356,7 +356,7 @@ impl Analysis { .map(|(p, vs)| { // Avoid divide by zero if vs.is_empty() { - return (p.clone(), 0, 0) + return (p.clone(), 0, 0); } let total = vs.iter().fold(0u128, |acc, v| acc + *v); let mean = total / vs.len() as u128; @@ -405,7 +405,7 @@ impl Analysis { let min_squares = Self::min_squares_iqr(r, selector); if median_slopes.is_none() || min_squares.is_none() { - return None + return None; } let median_slopes = median_slopes.unwrap(); @@ -438,7 +438,7 @@ fn ms(mut nanos: u128) -> String { while x > 1 { if nanos > x * 1_000 { nanos = nanos / x * x; - break + break; } x /= 10; } diff --git a/frame/benchmarking/src/lib.rs b/frame/benchmarking/src/lib.rs index a221eccb82c85..04b3204b892ed 100644 --- a/frame/benchmarking/src/lib.rs +++ b/frame/benchmarking/src/lib.rs @@ -910,7 +910,7 @@ macro_rules! impl_bench_name_tests { // Every variant must implement [`BenchmarkingSetup`]. // // ```nocompile -// +// // struct Transfer; // impl BenchmarkingSetup for Transfer { ... } // diff --git a/frame/benchmarking/src/tests.rs b/frame/benchmarking/src/tests.rs index 88a7d6d0286b2..7017e80d6075a 100644 --- a/frame/benchmarking/src/tests.rs +++ b/frame/benchmarking/src/tests.rs @@ -66,7 +66,7 @@ mod pallet_test { #[pallet::weight(0)] pub fn always_error(_origin: OriginFor) -> DispatchResult { - return Err("I always fail".into()) + return Err("I always fail".into()); } } } diff --git a/frame/bounties/src/benchmarking.rs b/frame/bounties/src/benchmarking.rs index 07dd781c29af3..5b5b3aa71bffa 100644 --- a/frame/bounties/src/benchmarking.rs +++ b/frame/bounties/src/benchmarking.rs @@ -52,8 +52,8 @@ fn setup_bounty, I: 'static>( let caller = account("caller", u, SEED); let value: BalanceOf = T::BountyValueMinimum::get().saturating_mul(100u32.into()); let fee = value / 2u32.into(); - let deposit = T::BountyDepositBase::get() + - T::DataDepositPerByte::get() * T::MaximumReasonLength::get().into(); + let deposit = T::BountyDepositBase::get() + + T::DataDepositPerByte::get() * T::MaximumReasonLength::get().into(); let _ = T::Currency::make_free_balance_be(&caller, deposit); let curator = account("curator", u, SEED); let _ = T::Currency::make_free_balance_be(&curator, fee / 2u32.into()); diff --git a/frame/bounties/src/lib.rs b/frame/bounties/src/lib.rs index d947226f87fa0..ae81befd7246d 100644 --- a/frame/bounties/src/lib.rs +++ b/frame/bounties/src/lib.rs @@ -447,7 +447,7 @@ pub mod pallet { match bounty.status { BountyStatus::Proposed | BountyStatus::Approved | BountyStatus::Funded => { // No curator to unassign at this point. - return Err(Error::::UnexpectedStatus.into()) + return Err(Error::::UnexpectedStatus.into()); }, BountyStatus::CuratorProposed { ref curator } => { // A curator has been proposed, but not accepted yet. @@ -472,7 +472,7 @@ pub mod pallet { // Continue to change bounty status below... } else { // Curator has more time to give an update. - return Err(Error::::Premature.into()) + return Err(Error::::Premature.into()); } } else { // Else this is the curator, willingly giving up their role. @@ -528,8 +528,8 @@ pub mod pallet { T::Currency::reserve(curator, deposit)?; bounty.curator_deposit = deposit; - let update_due = frame_system::Pallet::::block_number() + - T::BountyUpdatePeriod::get(); + let update_due = frame_system::Pallet::::block_number() + + T::BountyUpdatePeriod::get(); bounty.status = BountyStatus::Active { curator: curator.clone(), update_due }; @@ -579,8 +579,8 @@ pub mod pallet { bounty.status = BountyStatus::PendingPayout { curator: signer, beneficiary: beneficiary.clone(), - unlock_at: frame_system::Pallet::::block_number() + - T::BountyDepositPayoutDelay::get(), + unlock_at: frame_system::Pallet::::block_number() + + T::BountyDepositPayoutDelay::get(), }; Ok(()) @@ -697,12 +697,12 @@ pub mod pallet { // Return early, nothing else to do. return Ok( Some(>::WeightInfo::close_bounty_proposed()).into() - ) + ); }, BountyStatus::Approved => { // For weight reasons, we don't allow a council to cancel in this phase. // We ask for them to wait until it is funded before they can cancel. - return Err(Error::::UnexpectedStatus.into()) + return Err(Error::::UnexpectedStatus.into()); }, BountyStatus::Funded | BountyStatus::CuratorProposed { .. } => { // Nothing extra to do besides the removal of the bounty below. @@ -719,7 +719,7 @@ pub mod pallet { // this bounty, it should mean the curator was acting maliciously. // So the council should first unassign the curator, slashing their // deposit. - return Err(Error::::PendingPayout.into()) + return Err(Error::::PendingPayout.into()); }, } @@ -767,8 +767,8 @@ pub mod pallet { match bounty.status { BountyStatus::Active { ref curator, ref mut update_due } => { ensure!(*curator == signer, Error::::RequireCurator); - *update_due = (frame_system::Pallet::::block_number() + - T::BountyUpdatePeriod::get()) + *update_due = (frame_system::Pallet::::block_number() + + T::BountyUpdatePeriod::get()) .max(*update_due); }, _ => return Err(Error::::UnexpectedStatus.into()), @@ -825,8 +825,8 @@ impl, I: 'static> Pallet { let index = Self::bounty_count(); // reserve deposit for new bounty - let bond = T::BountyDepositBase::get() + - T::DataDepositPerByte::get() * (bounded_description.len() as u32).into(); + let bond = T::BountyDepositBase::get() + + T::DataDepositPerByte::get() * (bounded_description.len() as u32).into(); T::Currency::reserve(&proposer, bond) .map_err(|_| Error::::InsufficientProposersBalance)?; diff --git a/frame/bounties/src/migrations/v4.rs b/frame/bounties/src/migrations/v4.rs index 2f81c97127bcd..af7b24f30d476 100644 --- a/frame/bounties/src/migrations/v4.rs +++ b/frame/bounties/src/migrations/v4.rs @@ -54,7 +54,7 @@ pub fn migrate< target: "runtime::bounties", "New pallet name is equal to the old prefix. No migration needs to be done.", ); - return Weight::zero() + return Weight::zero(); } let on_chain_storage_version =

::on_chain_storage_version(); diff --git a/frame/child-bounties/src/benchmarking.rs b/frame/child-bounties/src/benchmarking.rs index 697ed40e0071f..ebd7e94a4a1b7 100644 --- a/frame/child-bounties/src/benchmarking.rs +++ b/frame/child-bounties/src/benchmarking.rs @@ -61,8 +61,8 @@ fn setup_bounty( let caller = account("caller", user, SEED); let value: BalanceOf = T::BountyValueMinimum::get().saturating_mul(100u32.into()); let fee = value / 2u32.into(); - let deposit = T::BountyDepositBase::get() + - T::DataDepositPerByte::get() * T::MaximumReasonLength::get().into(); + let deposit = T::BountyDepositBase::get() + + T::DataDepositPerByte::get() * T::MaximumReasonLength::get().into(); let _ = T::Currency::make_free_balance_be(&caller, deposit); let curator = account("curator", user, SEED); let _ = T::Currency::make_free_balance_be(&curator, fee / 2u32.into()); diff --git a/frame/child-bounties/src/lib.rs b/frame/child-bounties/src/lib.rs index 2dfe0660ad68e..8b87a8509ffc7 100644 --- a/frame/child-bounties/src/lib.rs +++ b/frame/child-bounties/src/lib.rs @@ -251,8 +251,8 @@ pub mod pallet { description.try_into().map_err(|_| BountiesError::::ReasonTooBig)?; ensure!(value >= T::ChildBountyValueMinimum::get(), BountiesError::::InvalidValue); ensure!( - Self::parent_child_bounties(parent_bounty_id) <= - T::MaxActiveChildBountyCount::get() as u32, + Self::parent_child_bounties(parent_bounty_id) + <= T::MaxActiveChildBountyCount::get() as u32, Error::::TooManyChildBounties, ); @@ -483,7 +483,7 @@ pub mod pallet { match child_bounty.status { ChildBountyStatus::Added => { // No curator to unassign at this point. - return Err(BountiesError::::UnexpectedStatus.into()) + return Err(BountiesError::::UnexpectedStatus.into()); }, ChildBountyStatus::CuratorProposed { ref curator } => { // A child-bounty curator has been proposed, but not accepted yet. @@ -491,8 +491,8 @@ pub mod pallet { // child-bounty curator can unassign the child-bounty curator. ensure!( maybe_sender.map_or(true, |sender| { - sender == *curator || - Self::ensure_bounty_active(parent_bounty_id) + sender == *curator + || Self::ensure_bounty_active(parent_bounty_id) .map_or(false, |(parent_curator, _)| { sender == parent_curator }) @@ -521,8 +521,8 @@ pub mod pallet { Some(sender) => { let (parent_curator, update_due) = Self::ensure_bounty_active(parent_bounty_id)?; - if sender == parent_curator || - update_due < frame_system::Pallet::::block_number() + if sender == parent_curator + || update_due < frame_system::Pallet::::block_number() { // Slash the child-bounty curator if // + the call is made by the parent bounty curator. @@ -531,7 +531,7 @@ pub mod pallet { // Continue to change bounty status below. } else { // Curator has more time to give an update. - return Err(BountiesError::::Premature.into()) + return Err(BountiesError::::Premature.into()); } }, } @@ -600,8 +600,8 @@ pub mod pallet { child_bounty.status = ChildBountyStatus::PendingPayout { curator: signer, beneficiary: beneficiary.clone(), - unlock_at: frame_system::Pallet::::block_number() + - T::BountyDepositPayoutDelay::get(), + unlock_at: frame_system::Pallet::::block_number() + + T::BountyDepositPayoutDelay::get(), }; Ok(()) } else { @@ -775,7 +775,7 @@ impl Pallet { bounty_fee: &BalanceOf, ) -> BalanceOf { if parent_curator == child_curator { - return Zero::zero() + return Zero::zero(); } // We just use the same logic from the parent bounties pallet. @@ -847,7 +847,7 @@ impl Pallet { // child-bounty, it should mean the child-bounty curator // was acting maliciously. So first unassign the // child-bounty curator, slashing their deposit. - return Err(BountiesError::::PendingPayout.into()) + return Err(BountiesError::::PendingPayout.into()); }, } diff --git a/frame/collective/src/lib.rs b/frame/collective/src/lib.rs index 06d5b1fab78e7..01c8e1ae0b381 100644 --- a/frame/collective/src/lib.rs +++ b/frame/collective/src/lib.rs @@ -819,7 +819,7 @@ impl, I: 'static> Pallet { if position_yes.is_none() { voting.ayes.push(who.clone()); } else { - return Err(Error::::DuplicateVote.into()) + return Err(Error::::DuplicateVote.into()); } if let Some(pos) = position_no { voting.nays.swap_remove(pos); @@ -828,7 +828,7 @@ impl, I: 'static> Pallet { if position_no.is_none() { voting.nays.push(who.clone()); } else { - return Err(Error::::DuplicateVote.into()) + return Err(Error::::DuplicateVote.into()); } if let Some(pos) = position_yes { voting.ayes.swap_remove(pos); @@ -882,7 +882,7 @@ impl, I: 'static> Pallet { ), Pays::Yes, ) - .into()) + .into()); } else if disapproved { Self::deposit_event(Event::Closed { proposal_hash, yes: yes_votes, no: no_votes }); let proposal_count = Self::do_disapprove_proposal(proposal_hash); @@ -890,7 +890,7 @@ impl, I: 'static> Pallet { Some(T::WeightInfo::close_early_disapproved(seats, proposal_count)), Pays::No, ) - .into()) + .into()); } // Only allow actual closing of the proposal after the voting period has ended. diff --git a/frame/collective/src/migrations/v4.rs b/frame/collective/src/migrations/v4.rs index 483c3f9fa9e69..8729066502c9d 100644 --- a/frame/collective/src/migrations/v4.rs +++ b/frame/collective/src/migrations/v4.rs @@ -45,7 +45,7 @@ pub fn migrate::on_chain_storage_version(); @@ -84,7 +84,7 @@ pub fn pre_migrate>(old_p log_migration("pre-migration", old_pallet_name, new_pallet_name); if new_pallet_name == old_pallet_name { - return + return; } let new_pallet_prefix = twox_128(new_pallet_name.as_bytes()); @@ -112,7 +112,7 @@ pub fn post_migrate>(old_ log_migration("post-migration", old_pallet_name, new_pallet_name); if new_pallet_name == old_pallet_name { - return + return; } // Assert that nothing remains at the old prefix. diff --git a/frame/contracts/primitives/src/lib.rs b/frame/contracts/primitives/src/lib.rs index 4faea9eb3ee75..7ca42ac2c8155 100644 --- a/frame/contracts/primitives/src/lib.rs +++ b/frame/contracts/primitives/src/lib.rs @@ -200,18 +200,20 @@ where match (self, rhs) { (Charge(lhs), Charge(rhs)) => Charge(lhs.saturating_add(*rhs)), (Refund(lhs), Refund(rhs)) => Refund(lhs.saturating_add(*rhs)), - (Charge(lhs), Refund(rhs)) => + (Charge(lhs), Refund(rhs)) => { if lhs >= rhs { Charge(lhs.saturating_sub(*rhs)) } else { Refund(rhs.saturating_sub(*lhs)) - }, - (Refund(lhs), Charge(rhs)) => + } + }, + (Refund(lhs), Charge(rhs)) => { if lhs > rhs { Refund(lhs.saturating_sub(*rhs)) } else { Charge(rhs.saturating_sub(*lhs)) - }, + } + }, } } @@ -221,18 +223,20 @@ where match (self, rhs) { (Charge(lhs), Refund(rhs)) => Charge(lhs.saturating_add(*rhs)), (Refund(lhs), Charge(rhs)) => Refund(lhs.saturating_add(*rhs)), - (Charge(lhs), Charge(rhs)) => + (Charge(lhs), Charge(rhs)) => { if lhs >= rhs { Charge(lhs.saturating_sub(*rhs)) } else { Refund(rhs.saturating_sub(*lhs)) - }, - (Refund(lhs), Refund(rhs)) => + } + }, + (Refund(lhs), Refund(rhs)) => { if lhs > rhs { Refund(lhs.saturating_sub(*rhs)) } else { Charge(rhs.saturating_sub(*lhs)) - }, + } + }, } } diff --git a/frame/contracts/proc-macro/src/lib.rs b/frame/contracts/proc-macro/src/lib.rs index 648bf0fd1f812..40a7374b9cf49 100644 --- a/frame/contracts/proc-macro/src/lib.rs +++ b/frame/contracts/proc-macro/src/lib.rs @@ -62,7 +62,7 @@ fn derive_debug( name.span() => compile_error!("WeightDebug is only supported for structs."); } - .into() + .into(); }; #[cfg(feature = "full")] @@ -99,7 +99,7 @@ fn iterate_fields(data: &syn::DataStruct, fmt: impl Fn(&Ident) -> TokenStream) - let recurse = fields.named.iter().filter_map(|f| { let name = f.ident.as_ref()?; if name.to_string().starts_with('_') { - return None + return None; } let value = fmt(name); let ret = quote_spanned! { f.span() => @@ -220,7 +220,7 @@ impl HostFn { match &result.arguments { syn::PathArguments::AngleBracketed(group) => { if group.args.len() != 2 { - return Err(err(span, &msg)) + return Err(err(span, &msg)); }; let arg2 = group.args.last().ok_or(err(span, &msg))?; @@ -259,7 +259,7 @@ impl HostFn { .to_string()), syn::Type::Tuple(tt) => { if !tt.elems.is_empty() { - return Err(err(arg1.span(), &msg)) + return Err(err(arg1.span(), &msg)); }; Ok("()".to_string()) }, @@ -572,7 +572,7 @@ pub fn define_env( if !attr.is_empty() { let msg = "Invalid `define_env` attribute macro: expected no attributes: `#[define_env]`."; let span = proc_macro2::TokenStream::from(attr).span(); - return syn::Error::new(span, msg).to_compile_error().into() + return syn::Error::new(span, msg).to_compile_error().into(); } let item = syn::parse_macro_input!(item as syn::ItemMod); diff --git a/frame/contracts/src/benchmarking/code.rs b/frame/contracts/src/benchmarking/code.rs index b14b107f34c90..842eeb95e8e7d 100644 --- a/frame/contracts/src/benchmarking/code.rs +++ b/frame/contracts/src/benchmarking/code.rs @@ -511,10 +511,12 @@ pub mod body { DynInstr::RandomI32(low, high) => { vec![Instruction::I32Const(rng.gen_range(*low..*high))] }, - DynInstr::RandomI32Repeated(num) => - (&mut rng).sample_iter(Standard).take(*num).map(Instruction::I32Const).collect(), - DynInstr::RandomI64Repeated(num) => - (&mut rng).sample_iter(Standard).take(*num).map(Instruction::I64Const).collect(), + DynInstr::RandomI32Repeated(num) => { + (&mut rng).sample_iter(Standard).take(*num).map(Instruction::I32Const).collect() + }, + DynInstr::RandomI64Repeated(num) => { + (&mut rng).sample_iter(Standard).take(*num).map(Instruction::I64Const).collect() + }, DynInstr::RandomGetLocal(low, high) => { vec![Instruction::GetLocal(rng.gen_range(*low..*high))] }, diff --git a/frame/contracts/src/benchmarking/mod.rs b/frame/contracts/src/benchmarking/mod.rs index 539f4b2cf737b..96750fbac0fc7 100644 --- a/frame/contracts/src/benchmarking/mod.rs +++ b/frame/contracts/src/benchmarking/mod.rs @@ -165,16 +165,16 @@ where /// Returns `true` iff all storage entries related to code storage exist. fn code_exists(hash: &CodeHash) -> bool { - >::contains_key(hash) && - >::contains_key(&hash) && - >::contains_key(&hash) + >::contains_key(hash) + && >::contains_key(&hash) + && >::contains_key(&hash) } /// Returns `true` iff no storage entry related to code storage exist. fn code_removed(hash: &CodeHash) -> bool { - !>::contains_key(hash) && - !>::contains_key(&hash) && - !>::contains_key(&hash) + !>::contains_key(hash) + && !>::contains_key(&hash) + && !>::contains_key(&hash) } } diff --git a/frame/contracts/src/exec.rs b/frame/contracts/src/exec.rs index 76b200001af78..9922c82ab9ac7 100644 --- a/frame/contracts/src/exec.rs +++ b/frame/contracts/src/exec.rs @@ -758,11 +758,11 @@ where // `AllowIndeterminism` will only be ever set in case of off-chain execution. // Instantiations are never allowed even when executing off-chain. - if !(executable.is_deterministic() || - (matches!(determinism, Determinism::AllowIndeterminism) && - matches!(entry_point, ExportedFunction::Call))) + if !(executable.is_deterministic() + || (matches!(determinism, Determinism::AllowIndeterminism) + && matches!(entry_point, ExportedFunction::Call))) { - return Err(Error::::Indeterministic.into()) + return Err(Error::::Indeterministic.into()); } let frame = Frame { @@ -787,7 +787,7 @@ where gas_limit: Weight, ) -> Result { if self.frames.len() == T::CallStack::size() { - return Err(Error::::MaxCallDepthReached.into()) + return Err(Error::::MaxCallDepthReached.into()); } // We need to make sure that changes made to the contract info are not discarded. @@ -847,7 +847,7 @@ where // Avoid useless work that would be reverted anyways. if output.did_revert() { - return Ok(output) + return Ok(output); } // Storage limit is enforced as late as possible (when the last frame returns) so that @@ -865,7 +865,7 @@ where (ExportedFunction::Constructor, _) => { // It is not allowed to terminate a contract inside its constructor. if matches!(frame.contract_info, CachedContract::Terminated) { - return Err(Error::::TerminatedInConstructor.into()) + return Err(Error::::TerminatedInConstructor.into()); } // Deposit an instantiation event. @@ -905,8 +905,9 @@ where with_transaction(|| -> TransactionOutcome> { let output = do_transaction(); match &output { - Ok(result) if !result.did_revert() => - TransactionOutcome::Commit(Ok((true, output))), + Ok(result) if !result.did_revert() => { + TransactionOutcome::Commit(Ok((true, output))) + }, _ => TransactionOutcome::Rollback(Ok((false, output))), } }); @@ -948,7 +949,7 @@ where // Only gas counter changes are persisted in case of a failure. if !persist { - return + return; } // Record the storage meter changes of the nested call into the parent meter. @@ -967,7 +968,7 @@ where // trigger a rollback. if prev.account_id == *account_id { prev.contract_info = CachedContract::Cached(contract); - return + return; } // Predecessor is a different contract: We persist the info and invalidate the first @@ -990,7 +991,7 @@ where } self.gas_meter.absorb_nested(mem::take(&mut self.first_frame.nested_gas)); if !persist { - return + return; } let mut contract = self.first_frame.contract_info.as_contract(); self.storage_meter.absorb( @@ -1026,7 +1027,7 @@ where // If it is a delegate call, then we've already transferred tokens in the // last non-delegate frame. if frame.delegate_caller.is_some() { - return Ok(()) + return Ok(()); } let value = frame.value_transferred; @@ -1106,7 +1107,7 @@ where let try_call = || { if !self.allows_reentry(&to) { - return Err(>::ReentranceDenied.into()) + return Err(>::ReentranceDenied.into()); } // We ignore instantiate frames in our search for a cached contract. // Otherwise it would be possible to recursively call a contract from its own @@ -1183,7 +1184,7 @@ where fn terminate(&mut self, beneficiary: &AccountIdOf) -> Result<(), DispatchError> { if self.is_recursive() { - return Err(Error::::TerminatedWhileReentrant.into()) + return Err(Error::::TerminatedWhileReentrant.into()); } let frame = self.top_frame_mut(); let info = frame.terminate(); @@ -1367,7 +1368,7 @@ where fn set_code_hash(&mut self, hash: CodeHash) -> Result<(), DispatchError> { let frame = top_frame_mut!(self); if !E::from_storage(hash, self.schedule, &mut frame.nested_gas)?.is_deterministic() { - return Err(>::Indeterministic.into()) + return Err(>::Indeterministic.into()); } E::add_user(hash)?; let prev_hash = frame.contract_info().code_hash; diff --git a/frame/contracts/src/lib.rs b/frame/contracts/src/lib.rs index 52fb0190ba3a9..9fb954bd7a8b9 100644 --- a/frame/contracts/src/lib.rs +++ b/frame/contracts/src/lib.rs @@ -516,7 +516,7 @@ pub mod pallet { let contract = if let Some(contract) = contract { contract } else { - return Err(>::ContractNotFound.into()) + return Err(>::ContractNotFound.into()); }; >::add_user(code_hash)?; >::remove_user(contract.code_hash); @@ -1085,12 +1085,13 @@ where let mut gas_meter = GasMeter::new(gas_limit); let mut storage_meter = match StorageMeter::new(&origin, storage_deposit_limit, value) { Ok(meter) => meter, - Err(err) => + Err(err) => { return InternalCallOutput { result: Err(err.into()), gas_meter, storage_deposit: Default::default(), - }, + } + }, }; let schedule = T::Schedule::get(); let result = ExecStack::>::run_call( diff --git a/frame/contracts/src/migration.rs b/frame/contracts/src/migration.rs index aa04d8b9b1084..bb673b2e7b1c2 100644 --- a/frame/contracts/src/migration.rs +++ b/frame/contracts/src/migration.rs @@ -420,7 +420,7 @@ mod post_checks { pub fn post_upgrade(old_version: StorageVersion) -> Result<(), &'static str> { if old_version < 7 { - return Ok(()) + return Ok(()); } if old_version < 8 { diff --git a/frame/contracts/src/schedule.rs b/frame/contracts/src/schedule.rs index 52cb7698d6952..e7c298a8bba42 100644 --- a/frame/contracts/src/schedule.rs +++ b/frame/contracts/src/schedule.rs @@ -709,25 +709,25 @@ impl<'a, T: Config> gas_metering::Rules for ScheduleRules<'a, T> { let weight = match *instruction { End | Unreachable | Return | Else => 0, I32Const(_) | I64Const(_) | Block(_) | Loop(_) | Nop | Drop => w.i64const, - I32Load(_, _) | - I32Load8S(_, _) | - I32Load8U(_, _) | - I32Load16S(_, _) | - I32Load16U(_, _) | - I64Load(_, _) | - I64Load8S(_, _) | - I64Load8U(_, _) | - I64Load16S(_, _) | - I64Load16U(_, _) | - I64Load32S(_, _) | - I64Load32U(_, _) => w.i64load, - I32Store(_, _) | - I32Store8(_, _) | - I32Store16(_, _) | - I64Store(_, _) | - I64Store8(_, _) | - I64Store16(_, _) | - I64Store32(_, _) => w.i64store, + I32Load(_, _) + | I32Load8S(_, _) + | I32Load8U(_, _) + | I32Load16S(_, _) + | I32Load16U(_, _) + | I64Load(_, _) + | I64Load8S(_, _) + | I64Load8U(_, _) + | I64Load16S(_, _) + | I64Load16U(_, _) + | I64Load32S(_, _) + | I64Load32U(_, _) => w.i64load, + I32Store(_, _) + | I32Store8(_, _) + | I32Store16(_, _) + | I64Store(_, _) + | I64Store8(_, _) + | I64Store16(_, _) + | I64Store32(_, _) => w.i64store, Select => w.select, If(_) => w.r#if, Br(_) => w.br, @@ -780,8 +780,9 @@ impl<'a, T: Config> gas_metering::Rules for ScheduleRules<'a, T> { // Returning None makes the gas instrumentation fail which we intend for // unsupported or unknown instructions. Offchain we might allow indeterminism and hence // use the fallback weight for those instructions. - _ if matches!(self.determinism, Determinism::AllowIndeterminism) && w.fallback > 0 => - w.fallback, + _ if matches!(self.determinism, Determinism::AllowIndeterminism) && w.fallback > 0 => { + w.fallback + }, _ => return None, }; Some(weight) diff --git a/frame/contracts/src/storage.rs b/frame/contracts/src/storage.rs index c7644e696196f..7834fa8ba2a5e 100644 --- a/frame/contracts/src/storage.rs +++ b/frame/contracts/src/storage.rs @@ -182,12 +182,13 @@ where if let Some(storage_meter) = storage_meter { let mut diff = meter::Diff::default(); match (old_len, new_value.as_ref().map(|v| v.len() as u32)) { - (Some(old_len), Some(new_len)) => + (Some(old_len), Some(new_len)) => { if new_len > old_len { diff.bytes_added = new_len - old_len; } else { diff.bytes_removed = old_len - new_len; - }, + } + }, (None, Some(new_len)) => { diff.bytes_added = new_len; diff.items_added = 1; @@ -223,7 +224,7 @@ where code_hash: CodeHash, ) -> Result, DispatchError> { if >::contains_key(account) { - return Err(Error::::DuplicateContract.into()) + return Err(Error::::DuplicateContract.into()); } let contract = ContractInfo:: { @@ -252,10 +253,10 @@ where /// and weight limit. pub fn deletion_budget(queue_len: usize, weight_limit: Weight) -> (u64, u32) { let base_weight = T::WeightInfo::on_process_deletion_queue_batch(); - let weight_per_queue_item = T::WeightInfo::on_initialize_per_queue_item(1) - - T::WeightInfo::on_initialize_per_queue_item(0); - let weight_per_key = (T::WeightInfo::on_initialize_per_trie_key(1) - - T::WeightInfo::on_initialize_per_trie_key(0)) + let weight_per_queue_item = T::WeightInfo::on_initialize_per_queue_item(1) + - T::WeightInfo::on_initialize_per_queue_item(0); + let weight_per_key = (T::WeightInfo::on_initialize_per_trie_key(1) + - T::WeightInfo::on_initialize_per_trie_key(0)) .ref_time(); let decoding_weight = weight_per_queue_item.saturating_mul(queue_len as u64); @@ -277,7 +278,7 @@ where pub fn process_deletion_queue_batch(weight_limit: Weight) -> Weight { let queue_len = >::decode_len().unwrap_or(0); if queue_len == 0 { - return Weight::zero() + return Weight::zero(); } let (weight_per_key, mut remaining_key_budget) = @@ -287,7 +288,7 @@ where // proceeding. Too little weight for decoding might happen during runtime upgrades // which consume the whole block before the other `on_initialize` blocks are called. if remaining_key_budget == 0 { - return weight_limit + return weight_limit; } let mut queue = >::get(); diff --git a/frame/contracts/src/storage/meter.rs b/frame/contracts/src/storage/meter.rs index 0a63eb42b86cb..3b1f52eeb15c3 100644 --- a/frame/contracts/src/storage/meter.rs +++ b/frame/contracts/src/storage/meter.rs @@ -159,7 +159,7 @@ impl Diff { } else { debug_assert_eq!(self.bytes_removed, 0); debug_assert_eq!(self.items_removed, 0); - return bytes_deposit.saturating_add(&items_deposit) + return bytes_deposit.saturating_add(&items_deposit); }; // Refunds are calculated pro rata based on the accumulated storage within the contract @@ -182,16 +182,20 @@ impl Diff { info.storage_items = info.storage_items.saturating_add(items_added).saturating_sub(items_removed); match &bytes_deposit { - Deposit::Charge(amount) => - info.storage_byte_deposit = info.storage_byte_deposit.saturating_add(*amount), - Deposit::Refund(amount) => - info.storage_byte_deposit = info.storage_byte_deposit.saturating_sub(*amount), + Deposit::Charge(amount) => { + info.storage_byte_deposit = info.storage_byte_deposit.saturating_add(*amount) + }, + Deposit::Refund(amount) => { + info.storage_byte_deposit = info.storage_byte_deposit.saturating_sub(*amount) + }, } match &items_deposit { - Deposit::Charge(amount) => - info.storage_item_deposit = info.storage_item_deposit.saturating_add(*amount), - Deposit::Refund(amount) => - info.storage_item_deposit = info.storage_item_deposit.saturating_sub(*amount), + Deposit::Charge(amount) => { + info.storage_item_deposit = info.storage_item_deposit.saturating_add(*amount) + }, + Deposit::Refund(amount) => { + info.storage_item_deposit = info.storage_item_deposit.saturating_sub(*amount) + }, } bytes_deposit.saturating_add(&items_deposit) @@ -391,7 +395,7 @@ where // contract's account into existence. deposit = deposit.max(Deposit::Charge(Pallet::::min_balance())); if deposit.charge_or_zero() > self.limit { - return Err(>::StorageDepositLimitExhausted.into()) + return Err(>::StorageDepositLimitExhausted.into()); } // We do not increase `own_contribution` because this will be charged later when the @@ -434,7 +438,7 @@ where } if let Deposit::Charge(amount) = total_deposit { if amount > self.limit { - return Err(>::StorageDepositLimitExhausted.into()) + return Err(>::StorageDepositLimitExhausted.into()); } } Ok(()) @@ -454,8 +458,8 @@ where let max = T::Currency::reducible_balance(origin, true).saturating_sub(min_leftover); let limit = limit.unwrap_or(max); ensure!( - limit <= max && - matches!(T::Currency::can_withdraw(origin, limit), WithdrawConsequence::Success), + limit <= max + && matches!(T::Currency::can_withdraw(origin, limit), WithdrawConsequence::Success), >::StorageDepositNotEnoughFunds, ); Ok(limit) diff --git a/frame/contracts/src/tests.rs b/frame/contracts/src/tests.rs index 7054ceb07a6fc..0ee52059bc518 100644 --- a/frame/contracts/src/tests.rs +++ b/frame/contracts/src/tests.rs @@ -2514,8 +2514,8 @@ fn reinstrument_does_charge() { assert!(result2.gas_consumed.ref_time() > result1.gas_consumed.ref_time()); assert_eq!( result2.gas_consumed.ref_time(), - result1.gas_consumed.ref_time() + - ::WeightInfo::reinstrument(code_len).ref_time(), + result1.gas_consumed.ref_time() + + ::WeightInfo::reinstrument(code_len).ref_time(), ); }); } diff --git a/frame/contracts/src/wasm/mod.rs b/frame/contracts/src/wasm/mod.rs index ba0a0abf11302..71cd9916bd006 100644 --- a/frame/contracts/src/wasm/mod.rs +++ b/frame/contracts/src/wasm/mod.rs @@ -473,8 +473,9 @@ mod tests { let entry = self.storage.entry(key.clone()); let result = match (entry, take_old) { (Entry::Vacant(_), _) => WriteOutcome::New, - (Entry::Occupied(entry), false) => - WriteOutcome::Overwritten(entry.remove().len() as u32), + (Entry::Occupied(entry), false) => { + WriteOutcome::Overwritten(entry.remove().len() as u32) + }, (Entry::Occupied(entry), true) => WriteOutcome::Taken(entry.remove()), }; if let Some(value) = value { @@ -492,8 +493,9 @@ mod tests { let entry = self.storage.entry(key.clone()); let result = match (entry, take_old) { (Entry::Vacant(_), _) => WriteOutcome::New, - (Entry::Occupied(entry), false) => - WriteOutcome::Overwritten(entry.remove().len() as u32), + (Entry::Occupied(entry), false) => { + WriteOutcome::Overwritten(entry.remove().len() as u32) + }, (Entry::Occupied(entry), true) => WriteOutcome::Taken(entry.remove()), }; if let Some(value) = value { diff --git a/frame/contracts/src/wasm/prepare.rs b/frame/contracts/src/wasm/prepare.rs index 3e6b9eee96680..0243036d182e4 100644 --- a/frame/contracts/src/wasm/prepare.rs +++ b/frame/contracts/src/wasm/prepare.rs @@ -68,7 +68,7 @@ impl<'a, T: Config> ContractModule<'a, T> { /// we reject such a module. fn ensure_no_internal_memory(&self) -> Result<(), &'static str> { if self.module.memory_section().map_or(false, |ms| ms.entries().len() > 0) { - return Err("module declares internal memory") + return Err("module declares internal memory"); } Ok(()) } @@ -79,13 +79,13 @@ impl<'a, T: Config> ContractModule<'a, T> { // In Wasm MVP spec, there may be at most one table declared. Double check this // explicitly just in case the Wasm version changes. if table_section.entries().len() > 1 { - return Err("multiple tables declared") + return Err("multiple tables declared"); } if let Some(table_type) = table_section.entries().first() { // Check the table's initial size as there is no instruction or environment function // capable of growing the table. if table_type.limits().initial() > limit { - return Err("table exceeds maximum size allowed") + return Err("table exceeds maximum size allowed"); } } } @@ -97,13 +97,13 @@ impl<'a, T: Config> ContractModule<'a, T> { let code_section = if let Some(type_section) = self.module.code_section() { type_section } else { - return Ok(()) + return Ok(()); }; for instr in code_section.bodies().iter().flat_map(|body| body.code().elements()) { use self::elements::Instruction::BrTable; if let BrTable(table) = instr { if table.table.len() > limit as usize { - return Err("BrTable's immediate value is too big.") + return Err("BrTable's immediate value is too big."); } } } @@ -113,7 +113,7 @@ impl<'a, T: Config> ContractModule<'a, T> { fn ensure_global_variable_limit(&self, limit: u32) -> Result<(), &'static str> { if let Some(global_section) = self.module.global_section() { if global_section.entries().len() > limit as usize { - return Err("module declares too many globals") + return Err("module declares too many globals"); } } Ok(()) @@ -124,8 +124,9 @@ impl<'a, T: Config> ContractModule<'a, T> { if let Some(global_section) = self.module.global_section() { for global in global_section.entries() { match global.global_type().content_type() { - ValueType::F32 | ValueType::F64 => - return Err("use of floating point type in globals is forbidden"), + ValueType::F32 | ValueType::F64 => { + return Err("use of floating point type in globals is forbidden") + }, _ => {}, } } @@ -135,8 +136,9 @@ impl<'a, T: Config> ContractModule<'a, T> { for func_body in code_section.bodies() { for local in func_body.locals() { match local.value_type() { - ValueType::F32 | ValueType::F64 => - return Err("use of floating point type in locals is forbidden"), + ValueType::F32 | ValueType::F64 => { + return Err("use of floating point type in locals is forbidden") + }, _ => {}, } } @@ -150,10 +152,11 @@ impl<'a, T: Config> ContractModule<'a, T> { let return_type = func_type.results().get(0); for value_type in func_type.params().iter().chain(return_type) { match value_type { - ValueType::F32 | ValueType::F64 => + ValueType::F32 | ValueType::F64 => { return Err( "use of floating point type in function types is forbidden", - ), + ) + }, _ => {}, } } @@ -170,12 +173,12 @@ impl<'a, T: Config> ContractModule<'a, T> { let type_section = if let Some(type_section) = self.module.type_section() { type_section } else { - return Ok(()) + return Ok(()); }; for Type::Function(func) in type_section.types() { if func.params().len() > limit as usize { - return Err("Use of a function type with too many parameters.") + return Err("Use of a function type with too many parameters."); } } @@ -247,7 +250,7 @@ impl<'a, T: Config> ContractModule<'a, T> { Some(fn_idx) => fn_idx, None => { // Underflow here means fn_idx points to imported function which we don't allow! - return Err("entry point points to an imported function") + return Err("entry point points to an imported function"); }, }; @@ -260,18 +263,18 @@ impl<'a, T: Config> ContractModule<'a, T> { .type_ref(); let Type::Function(ref func_ty) = types.get(func_ty_idx as usize).ok_or("function has a non-existent type")?; - if !(func_ty.params().is_empty() && - (func_ty.results().is_empty() || func_ty.results() == [ValueType::I32])) + if !(func_ty.params().is_empty() + && (func_ty.results().is_empty() || func_ty.results() == [ValueType::I32])) { - return Err("entry point has wrong signature") + return Err("entry point has wrong signature"); } } if !deploy_found { - return Err("deploy function isn't exported") + return Err("deploy function isn't exported"); } if !call_found { - return Err("call function isn't exported") + return Err("call function isn't exported"); } Ok(()) @@ -302,16 +305,16 @@ impl<'a, T: Config> ContractModule<'a, T> { External::Function(ref type_idx) => type_idx, External::Memory(ref memory_type) => { if import.module() != IMPORT_MODULE_MEMORY { - return Err("Invalid module for imported memory") + return Err("Invalid module for imported memory"); } if import.field() != "memory" { - return Err("Memory import must have the field name 'memory'") + return Err("Memory import must have the field name 'memory'"); } if imported_mem_type.is_some() { - return Err("Multiple memory imports defined") + return Err("Multiple memory imports defined"); } imported_mem_type = Some(memory_type); - continue + continue; }, }; @@ -319,16 +322,16 @@ impl<'a, T: Config> ContractModule<'a, T> { .get(*type_idx as usize) .ok_or("validation: import entry points to a non-existent type")?; - if !T::ChainExtension::enabled() && - import.field().as_bytes() == b"seal_call_chain_extension" + if !T::ChainExtension::enabled() + && import.field().as_bytes() == b"seal_call_chain_extension" { - return Err("module uses chain extensions but chain extensions are disabled") + return Err("module uses chain extensions but chain extensions are disabled"); } - if import_fn_banlist.iter().any(|f| import.field().as_bytes() == *f) || - !C::can_satisfy(import.module().as_bytes(), import.field().as_bytes(), func_ty) + if import_fn_banlist.iter().any(|f| import.field().as_bytes() == *f) + || !C::can_satisfy(import.module().as_bytes(), import.field().as_bytes(), func_ty) { - return Err("module imports a non-existent function") + return Err("module imports a non-existent function"); } } Ok(imported_mem_type) @@ -347,10 +350,12 @@ fn get_memory_limits( // Inspect the module to extract the initial and maximum page count. let limits = memory_type.limits(); match (limits.initial(), limits.maximum()) { - (initial, Some(maximum)) if initial > maximum => - Err("Requested initial number of pages should not exceed the requested maximum"), - (_, Some(maximum)) if maximum > schedule.limits.memory_pages => - Err("Maximum number of pages should not exceed the configured maximum."), + (initial, Some(maximum)) if initial > maximum => { + Err("Requested initial number of pages should not exceed the requested maximum") + }, + (_, Some(maximum)) if maximum > schedule.limits.memory_pages => { + Err("Maximum number of pages should not exceed the configured maximum.") + }, (initial, Some(maximum)) => Ok((initial, maximum)), (_, None) => { // Maximum number of pages should be always declared. diff --git a/frame/contracts/src/wasm/runtime.rs b/frame/contracts/src/wasm/runtime.rs index 3dac03cac625b..ebee71522a01a 100644 --- a/frame/contracts/src/wasm/runtime.rs +++ b/frame/contracts/src/wasm/runtime.rs @@ -302,8 +302,9 @@ impl RuntimeCosts { ContainsStorage(len) => s .contains_storage .saturating_add(s.contains_storage_per_byte.saturating_mul(len.into())), - GetStorage(len) => - s.get_storage.saturating_add(s.get_storage_per_byte.saturating_mul(len.into())), + GetStorage(len) => { + s.get_storage.saturating_add(s.get_storage_per_byte.saturating_mul(len.into())) + }, #[cfg(feature = "unstable-interface")] TakeStorage(len) => s .take_storage @@ -495,10 +496,11 @@ where ReturnFlags::from_bits(flags).ok_or(Error::::InvalidCallFlags)?; Ok(ExecReturnValue { flags, data }) }, - TrapReason::Termination => - Ok(ExecReturnValue { flags: ReturnFlags::empty(), data: Vec::new() }), + TrapReason::Termination => { + Ok(ExecReturnValue { flags: ReturnFlags::empty(), data: Vec::new() }) + }, TrapReason::SupervisorError(error) => return Err(error.into()), - } + }; } // Check the exact type of the error. @@ -513,8 +515,9 @@ where // a trap for now. Eventually, we might want to revisit this. Err(sp_sandbox::Error::Module) => return Err("validation error".into()), // Any other kind of a trap should result in a failure. - Err(sp_sandbox::Error::Execution) | Err(sp_sandbox::Error::OutOfBounds) => - return Err(Error::::ContractTrapped.into()), + Err(sp_sandbox::Error::Execution) | Err(sp_sandbox::Error::OutOfBounds) => { + return Err(Error::::ContractTrapped.into()) + }, } } @@ -644,14 +647,14 @@ where create_token: impl FnOnce(u32) -> Option, ) -> Result<(), DispatchError> { if allow_skip && out_ptr == SENTINEL { - return Ok(()) + return Ok(()); } let buf_len = buf.len() as u32; let len: u32 = self.read_sandbox_memory_as(out_len_ptr)?; if len < buf_len { - return Err(Error::::OutputBufferTooSmall.into()) + return Err(Error::::OutputBufferTooSmall.into()); } if let Some(costs) = create_token(buf_len) { @@ -749,7 +752,7 @@ where let charged = self .charge_gas(RuntimeCosts::SetStorage { new_bytes: value_len, old_bytes: max_size })?; if value_len > max_size { - return Err(Error::::ValueTooLarge.into()) + return Err(Error::::ValueTooLarge.into()); } let key = self.read_sandbox_memory(key_ptr, key_type.len::()?)?; let value = Some(self.read_sandbox_memory(value_ptr, value_len)?); @@ -876,7 +879,7 @@ where }, CallType::DelegateCall { code_hash_ptr } => { if flags.contains(CallFlags::ALLOW_REENTRY) { - return Err(Error::::InvalidCallFlags.into()) + return Err(Error::::InvalidCallFlags.into()); } let code_hash = self.read_sandbox_memory_as(code_hash_ptr)?; self.ext.delegate_call(code_hash, input_data) @@ -890,7 +893,7 @@ where return Err(TrapReason::Return(ReturnData { flags: return_value.flags.bits(), data: return_value.data, - })) + })); } } @@ -1819,7 +1822,7 @@ pub mod env { ) -> Result<(), TrapReason> { ctx.charge_gas(RuntimeCosts::Random)?; if subject_len > ctx.ext.schedule().limits.subject_len { - return Err(Error::::RandomSubjectTooLong.into()) + return Err(Error::::RandomSubjectTooLong.into()); } let subject_buf = ctx.read_sandbox_memory(subject_ptr, subject_len)?; Ok(ctx.write_sandbox_output( @@ -1863,7 +1866,7 @@ pub mod env { ) -> Result<(), TrapReason> { ctx.charge_gas(RuntimeCosts::Random)?; if subject_len > ctx.ext.schedule().limits.subject_len { - return Err(Error::::RandomSubjectTooLong.into()) + return Err(Error::::RandomSubjectTooLong.into()); } let subject_buf = ctx.read_sandbox_memory(subject_ptr, subject_len)?; Ok(ctx.write_sandbox_output( @@ -2001,7 +2004,7 @@ pub mod env { .ok_or("Zero sized topics are not allowed")?; ctx.charge_gas(RuntimeCosts::DepositEvent { num_topic, len: data_len })?; if data_len > ctx.ext.max_value_size() { - return Err(Error::::ValueTooLarge.into()) + return Err(Error::::ValueTooLarge.into()); } let mut topics: Vec::T>> = match topics_len { @@ -2011,14 +2014,14 @@ pub mod env { // If there are more than `event_topics`, then trap. if topics.len() > ctx.ext.schedule().limits.event_topics as usize { - return Err(Error::::TooManyTopics.into()) + return Err(Error::::TooManyTopics.into()); } // Check for duplicate topics. If there are any, then trap. // Complexity O(n * log(n)) and no additional allocations. // This also sorts the topics. if has_duplicates(&mut topics) { - return Err(Error::::DuplicateTopics.into()) + return Err(Error::::DuplicateTopics.into()); } let event_data = ctx.read_sandbox_memory(data_ptr, data_len)?; @@ -2228,7 +2231,7 @@ pub mod env { ) -> Result { use crate::chain_extension::{ChainExtension, Environment, RetVal}; if !::ChainExtension::enabled() { - return Err(Error::::NoChainExtension.into()) + return Err(Error::::NoChainExtension.into()); } let mut chain_extension = ctx.chain_extension.take().expect( "Constructor initializes with `Some`. This is the only place where it is set to `None`.\ @@ -2237,8 +2240,9 @@ pub mod env { let env = Environment::new(ctx, id, input_ptr, input_len, output_ptr, output_len_ptr); let ret = match chain_extension.call(env)? { RetVal::Converging(val) => Ok(val), - RetVal::Diverging { flags, data } => - Err(TrapReason::Return(ReturnData { flags: flags.bits(), data })), + RetVal::Diverging { flags, data } => { + Err(TrapReason::Return(ReturnData { flags: flags.bits(), data })) + }, }; ctx.chain_extension = Some(chain_extension); ret @@ -2273,7 +2277,7 @@ pub mod env { let msg = core::str::from_utf8(&data).map_err(|_| >::DebugMessageInvalidUTF8)?; ctx.ext.append_debug_buffer(msg); - return Ok(ReturnCode::Success) + return Ok(ReturnCode::Success); } Ok(ReturnCode::LoggingDisabled) } diff --git a/frame/conviction-voting/src/lib.rs b/frame/conviction-voting/src/lib.rs index 3ecc6e56be94e..708fc413d7bcb 100644 --- a/frame/conviction-voting/src/lib.rs +++ b/frame/conviction-voting/src/lib.rs @@ -409,7 +409,7 @@ impl, I: 'static> Pallet { tally.increase(approve, *delegations); } } else { - return Err(Error::::AlreadyDelegating.into()) + return Err(Error::::AlreadyDelegating.into()); } // Extend the lock to `balance` (rather than setting it) since we don't know what // other votes are in place. @@ -557,8 +557,9 @@ impl, I: 'static> Pallet { }), ); match old { - Voting::Delegating(Delegating { .. }) => - return Err(Error::::AlreadyDelegating.into()), + Voting::Delegating(Delegating { .. }) => { + return Err(Error::::AlreadyDelegating.into()) + }, Voting::Casting(Casting { votes, delegations, prior }) => { // here we just ensure that we're currently idling with no votes recorded. ensure!(votes.is_empty(), Error::::AlreadyVoting); diff --git a/frame/conviction-voting/src/tests.rs b/frame/conviction-voting/src/tests.rs index 7a3f80442014a..ed65fdf8fa03b 100644 --- a/frame/conviction-voting/src/tests.rs +++ b/frame/conviction-voting/src/tests.rs @@ -139,8 +139,9 @@ impl Polling> for TestPolls { let mut polls = Polls::get(); let entry = polls.get_mut(&index); let r = match entry { - Some(Ongoing(ref mut tally_mut_ref, class)) => - f(PollStatus::Ongoing(tally_mut_ref, *class)), + Some(Ongoing(ref mut tally_mut_ref, class)) => { + f(PollStatus::Ongoing(tally_mut_ref, *class)) + }, Some(Completed(when, succeeded)) => f(PollStatus::Completed(*when, *succeeded)), None => f(PollStatus::None), }; @@ -154,8 +155,9 @@ impl Polling> for TestPolls { let mut polls = Polls::get(); let entry = polls.get_mut(&index); let r = match entry { - Some(Ongoing(ref mut tally_mut_ref, class)) => - f(PollStatus::Ongoing(tally_mut_ref, *class)), + Some(Ongoing(ref mut tally_mut_ref, class)) => { + f(PollStatus::Ongoing(tally_mut_ref, *class)) + }, Some(Completed(when, succeeded)) => f(PollStatus::Completed(*when, *succeeded)), None => f(PollStatus::None), }?; diff --git a/frame/conviction-voting/src/vote.rs b/frame/conviction-voting/src/vote.rs index a8e012b6c97a1..f8b9e57bdcda5 100644 --- a/frame/conviction-voting/src/vote.rs +++ b/frame/conviction-voting/src/vote.rs @@ -83,8 +83,9 @@ impl AccountVote { // winning side: can only be removed after the lock period ends. match self { AccountVote::Standard { vote: Vote { conviction: Conviction::None, .. }, .. } => None, - AccountVote::Standard { vote, balance } if vote.aye == approved => - Some((vote.conviction.lock_periods(), balance)), + AccountVote::Standard { vote, balance } if vote.aye == approved => { + Some((vote.conviction.lock_periods(), balance)) + }, _ => None, } } @@ -236,8 +237,9 @@ where /// The amount of this account's balance that must currently be locked due to voting. pub fn locked_balance(&self) -> Balance { match self { - Voting::Casting(Casting { votes, prior, .. }) => - votes.iter().map(|i| i.1.balance()).fold(prior.locked(), |a, i| a.max(i)), + Voting::Casting(Casting { votes, prior, .. }) => { + votes.iter().map(|i| i.1.balance()).fold(prior.locked(), |a, i| a.max(i)) + }, Voting::Delegating(Delegating { balance, prior, .. }) => *balance.max(&prior.locked()), } } @@ -248,10 +250,12 @@ where prior: PriorLock, ) { let (d, p) = match self { - Voting::Casting(Casting { ref mut delegations, ref mut prior, .. }) => - (delegations, prior), - Voting::Delegating(Delegating { ref mut delegations, ref mut prior, .. }) => - (delegations, prior), + Voting::Casting(Casting { ref mut delegations, ref mut prior, .. }) => { + (delegations, prior) + }, + Voting::Delegating(Delegating { ref mut delegations, ref mut prior, .. }) => { + (delegations, prior) + }, }; *d = delegations; *p = prior; diff --git a/frame/democracy/src/lib.rs b/frame/democracy/src/lib.rs index cf954d4800eee..7626fcd362b11 100644 --- a/frame/democracy/src/lib.rs +++ b/frame/democracy/src/lib.rs @@ -790,7 +790,7 @@ pub mod pallet { if let Some((ext_proposal, _)) = NextExternal::::get() { ensure!(proposal_hash == ext_proposal.hash(), Error::::ProposalMissing); } else { - return Err(Error::::NoProposal.into()) + return Err(Error::::NoProposal.into()); } let mut existing_vetoers = @@ -1422,7 +1422,7 @@ impl Pallet { ); Ok(()) } else { - return Err(Error::::NoneWaiting.into()) + return Err(Error::::NoneWaiting.into()); } } @@ -1451,7 +1451,7 @@ impl Pallet { } Ok(()) } else { - return Err(Error::::NoneWaiting.into()) + return Err(Error::::NoneWaiting.into()); } } @@ -1541,8 +1541,8 @@ impl Pallet { // of unbaked referendum is bounded by this number. In case those number have changed in a // runtime upgrade the formula should be adjusted but the bound should still be sensible. >::mutate(|ref_index| { - while *ref_index < last && - Self::referendum_info(*ref_index) + while *ref_index < last + && Self::referendum_info(*ref_index) .map_or(true, |info| matches!(info, ReferendumInfo::Finished { .. })) { *ref_index += 1 diff --git a/frame/democracy/src/migrations.rs b/frame/democracy/src/migrations.rs index 3ec249c1d981c..09754c65fea09 100644 --- a/frame/democracy/src/migrations.rs +++ b/frame/democracy/src/migrations.rs @@ -83,7 +83,7 @@ pub mod v1 { "skipping on_runtime_upgrade: executed on wrong storage version.\ Expected version 0" ); - return weight + return weight; } ReferendumInfoOf::::translate( @@ -91,16 +91,18 @@ pub mod v1 { weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 1)); log::info!(target: TARGET, "migrating referendum #{:?}", &index); Some(match old { - ReferendumInfo::Ongoing(status) => + ReferendumInfo::Ongoing(status) => { ReferendumInfo::Ongoing(ReferendumStatus { end: status.end, proposal: Bounded::from_legacy_hash(status.proposal), threshold: status.threshold, delay: status.delay, tally: status.tally, - }), - ReferendumInfo::Finished { approved, end } => - ReferendumInfo::Finished { approved, end }, + }) + }, + ReferendumInfo::Finished { approved, end } => { + ReferendumInfo::Finished { approved, end } + }, }) }, ); diff --git a/frame/democracy/src/vote.rs b/frame/democracy/src/vote.rs index 122f54febd8cf..8d6c97fe7d0f8 100644 --- a/frame/democracy/src/vote.rs +++ b/frame/democracy/src/vote.rs @@ -88,8 +88,9 @@ impl AccountVote { pub fn locked_if(self, approved: bool) -> Option<(u32, Balance)> { // winning side: can only be removed after the lock period ends. match self { - AccountVote::Standard { vote, balance } if vote.aye == approved => - Some((vote.conviction.lock_periods(), balance)), + AccountVote::Standard { vote, balance } if vote.aye == approved => { + Some((vote.conviction.lock_periods(), balance)) + }, _ => None, } } @@ -205,8 +206,9 @@ impl< /// The amount of this account's balance that much currently be locked due to voting. pub fn locked_balance(&self) -> Balance { match self { - Voting::Direct { votes, prior, .. } => - votes.iter().map(|i| i.1.balance()).fold(prior.locked(), |a, i| a.max(i)), + Voting::Direct { votes, prior, .. } => { + votes.iter().map(|i| i.1.balance()).fold(prior.locked(), |a, i| a.max(i)) + }, Voting::Delegating { balance, prior, .. } => *balance.max(&prior.locked()), } } diff --git a/frame/democracy/src/vote_threshold.rs b/frame/democracy/src/vote_threshold.rs index e8ef91def9820..c48de77bc11fb 100644 --- a/frame/democracy/src/vote_threshold.rs +++ b/frame/democracy/src/vote_threshold.rs @@ -60,18 +60,18 @@ fn compare_rationals< let q1 = n1 / d1; let q2 = n2 / d2; if q1 < q2 { - return true + return true; } if q2 < q1 { - return false + return false; } let r1 = n1 % d1; let r2 = n2 % d2; if r2.is_zero() { - return false + return false; } if r1.is_zero() { - return true + return true; } n1 = d2; n2 = d1; @@ -95,13 +95,15 @@ impl< let sqrt_voters = tally.turnout.integer_sqrt(); let sqrt_electorate = electorate.integer_sqrt(); if sqrt_voters.is_zero() { - return false + return false; } match *self { - VoteThreshold::SuperMajorityApprove => - compare_rationals(tally.nays, sqrt_voters, tally.ayes, sqrt_electorate), - VoteThreshold::SuperMajorityAgainst => - compare_rationals(tally.nays, sqrt_electorate, tally.ayes, sqrt_voters), + VoteThreshold::SuperMajorityApprove => { + compare_rationals(tally.nays, sqrt_voters, tally.ayes, sqrt_electorate) + }, + VoteThreshold::SuperMajorityAgainst => { + compare_rationals(tally.nays, sqrt_electorate, tally.ayes, sqrt_voters) + }, VoteThreshold::SimpleMajority => tally.ayes > tally.nays, } } diff --git a/frame/election-provider-multi-phase/src/lib.rs b/frame/election-provider-multi-phase/src/lib.rs index bc19e5143424c..238d359c2d92b 100644 --- a/frame/election-provider-multi-phase/src/lib.rs +++ b/frame/election-provider-multi-phase/src/lib.rs @@ -1405,7 +1405,7 @@ impl Pallet { .map_err(ElectionError::DataProvider)?; if targets.len() > target_limit || voters.len() > voter_limit { - return Err(ElectionError::DataProvider("Snapshot too big for submission.")) + return Err(ElectionError::DataProvider("Snapshot too big for submission.")); } let mut desired_targets = @@ -1531,7 +1531,7 @@ impl Pallet { // Check that all of the targets are valid based on the snapshot. if assignment.distribution.iter().any(|(d, _)| !targets.contains(d)) { - return Err(FeasibilityError::InvalidVote) + return Err(FeasibilityError::InvalidVote); } Ok(()) })?; @@ -2466,8 +2466,8 @@ mod tests { let mut active = 1; while weight_with(active) - .all_lte(::BlockWeights::get().max_block) || - active == all_voters + .all_lte(::BlockWeights::get().max_block) + || active == all_voters { active += 1; } diff --git a/frame/election-provider-multi-phase/src/mock.rs b/frame/election-provider-multi-phase/src/mock.rs index 8ab7e5bbf733d..933f3afedfbea 100644 --- a/frame/election-provider-multi-phase/src/mock.rs +++ b/frame/election-provider-multi-phase/src/mock.rs @@ -366,10 +366,12 @@ impl MinerConfig for Runtime { MockedWeightInfo::Basic => Weight::from_ref_time( (10 as u64).saturating_add((5 as u64).saturating_mul(a as u64)), ), - MockedWeightInfo::Complex => - Weight::from_ref_time((0 * v + 0 * t + 1000 * a + 0 * d) as u64), - MockedWeightInfo::Real => - <() as multi_phase::weights::WeightInfo>::feasibility_check(v, t, a, d), + MockedWeightInfo::Complex => { + Weight::from_ref_time((0 * v + 0 * t + 1000 * a + 0 * d) as u64) + }, + MockedWeightInfo::Real => { + <() as multi_phase::weights::WeightInfo>::feasibility_check(v, t, a, d) + }, } } } @@ -435,10 +437,10 @@ impl ElectionDataProvider for StakingMock { fn electable_targets(maybe_max_len: Option) -> data_provider::Result> { let targets = Targets::get(); - if !DataProviderAllowBadData::get() && - maybe_max_len.map_or(false, |max_len| targets.len() > max_len) + if !DataProviderAllowBadData::get() + && maybe_max_len.map_or(false, |max_len| targets.len() > max_len) { - return Err("Targets too big") + return Err("Targets too big"); } Ok(targets) diff --git a/frame/election-provider-multi-phase/src/signed.rs b/frame/election-provider-multi-phase/src/signed.rs index 9d629ad77fd79..2b92dc9263ad8 100644 --- a/frame/election-provider-multi-phase/src/signed.rs +++ b/frame/election-provider-multi-phase/src/signed.rs @@ -219,7 +219,7 @@ impl SignedSubmissions { insert: Option<(ElectionScore, T::BlockNumber, u32)>, ) -> Option> { if remove_pos >= self.indices.len() { - return None + return None; } // safe: index was just checked in the line above. @@ -319,7 +319,7 @@ impl SignedSubmissions { // if we haven't improved on the weakest score, don't change anything. if !submission.raw_solution.score.strict_threshold_better(weakest_score, threshold) { - return InsertResult::NotInserted + return InsertResult::NotInserted; } self.swap_out_submission( @@ -410,7 +410,7 @@ impl Pallet { weight = weight .saturating_add(T::WeightInfo::finalize_signed_phase_accept_solution()); - break + break; }, Err(_) => { log!(warn, "finalized_signed: invalid signed submission found, slashing."); diff --git a/frame/election-provider-multi-phase/src/unsigned.rs b/frame/election-provider-multi-phase/src/unsigned.rs index 7340605dfe621..1549be4d80eba 100644 --- a/frame/election-provider-multi-phase/src/unsigned.rs +++ b/frame/election-provider-multi-phase/src/unsigned.rs @@ -109,8 +109,9 @@ fn save_solution(call: &Call) -> Result<(), MinerError> { let storage = StorageValueRef::persistent(OFFCHAIN_CACHED_CALL); match storage.mutate::<_, (), _>(|_| Ok(call.clone())) { Ok(_) => Ok(()), - Err(MutateStorageError::ConcurrentModification(_)) => - Err(MinerError::FailedToStoreSolution), + Err(MutateStorageError::ConcurrentModification(_)) => { + Err(MinerError::FailedToStoreSolution) + }, Err(MutateStorageError::ValueFunctionFailed(_)) => { // this branch should be unreachable according to the definition of // `StorageValueRef::mutate`: that function should only ever `Err` if the closure we @@ -301,8 +302,9 @@ impl Pallet { |maybe_head: Result, _>| { match maybe_head { Ok(Some(head)) if now < head => Err("fork."), - Ok(Some(head)) if now >= head && now <= head + threshold => - Err("recently executed."), + Ok(Some(head)) if now >= head && now <= head + threshold => { + Err("recently executed.") + }, Ok(Some(head)) if now > head + threshold => { // we can run again now. Write the new head. Ok(now) @@ -319,8 +321,9 @@ impl Pallet { // all good Ok(_) => Ok(()), // failed to write. - Err(MutateStorageError::ConcurrentModification(_)) => - Err(MinerError::Lock("failed to write to offchain db (concurrent modification).")), + Err(MutateStorageError::ConcurrentModification(_)) => { + Err(MinerError::Lock("failed to write to offchain db (concurrent modification).")) + }, // fork etc. Err(MutateStorageError::ValueFunctionFailed(why)) => Err(MinerError::Lock(why)), } @@ -344,8 +347,8 @@ impl Pallet { // ensure correct number of winners. ensure!( - Self::desired_targets().unwrap_or_default() == - raw_solution.solution.unique_targets().len() as u32, + Self::desired_targets().unwrap_or_default() + == raw_solution.solution.unique_targets().len() as u32, Error::::PreDispatchWrongWinnerCount, ); @@ -536,7 +539,7 @@ impl Miner { // not much we can do if assignments are already empty. if high == low { - return Ok(()) + return Ok(()); } while high - low > 1 { @@ -547,8 +550,8 @@ impl Miner { high = test; } } - let maximum_allowed_voters = if low < assignments.len() && - encoded_size_of(&assignments[..low + 1])? <= max_allowed_length + let maximum_allowed_voters = if low < assignments.len() + && encoded_size_of(&assignments[..low + 1])? <= max_allowed_length { low + 1 } else { @@ -560,8 +563,8 @@ impl Miner { encoded_size_of(&assignments[..maximum_allowed_voters]).unwrap() <= max_allowed_length ); debug_assert!(if maximum_allowed_voters < assignments.len() { - encoded_size_of(&assignments[..maximum_allowed_voters + 1]).unwrap() > - max_allowed_length + encoded_size_of(&assignments[..maximum_allowed_voters + 1]).unwrap() + > max_allowed_length } else { true }); @@ -626,7 +629,7 @@ impl Miner { max_weight: Weight, ) -> u32 { if size.voters < 1 { - return size.voters + return size.voters; } let max_voters = size.voters.max(1); diff --git a/frame/election-provider-support/solution-type/src/lib.rs b/frame/election-provider-support/solution-type/src/lib.rs index 0a5c11e76dedb..1cae99affce03 100644 --- a/frame/election-provider-support/solution-type/src/lib.rs +++ b/frame/election-provider-support/solution-type/src/lib.rs @@ -155,10 +155,10 @@ fn check_attributes(input: ParseStream) -> syn::Result { return Err(syn::Error::new_spanned( extra_attr, "compact solution can accept only #[compact]", - )) + )); } if attrs.is_empty() { - return Ok(false) + return Ok(false); } let attr = attrs.pop().expect("attributes vec with len 1 can be popped."); if attr.path.is_ident("compact") { @@ -183,7 +183,7 @@ impl Parse for SolutionDef { let generics: syn::AngleBracketedGenericArguments = input.parse()?; if generics.args.len() != 4 { - return Err(syn_err("Must provide 4 generic args.")) + return Err(syn_err("Must provide 4 generic args.")); } let expected_types = ["VoterIndex", "TargetIndex", "Accuracy", "MaxVoters"]; @@ -267,7 +267,7 @@ mod tests { fn ui_fail() { // Only run the ui tests when `RUN_UI_TESTS` is set. if std::env::var("RUN_UI_TESTS").is_err() { - return + return; } let cases = trybuild::TestCases::new(); diff --git a/frame/election-provider-support/solution-type/src/single_page.rs b/frame/election-provider-support/solution-type/src/single_page.rs index a7ccf5085d2b1..d231b2ceb10b1 100644 --- a/frame/election-provider-support/solution-type/src/single_page.rs +++ b/frame/election-provider-support/solution-type/src/single_page.rs @@ -33,7 +33,7 @@ pub(crate) fn generate(def: crate::SolutionDef) -> Result { } = def; if count <= 2 { - return Err(syn_err("cannot build solution struct with capacity less than 3.")) + return Err(syn_err("cannot build solution struct with capacity less than 3.")); } let single = { diff --git a/frame/election-provider-support/src/lib.rs b/frame/election-provider-support/src/lib.rs index 38924a18e2f54..844ba6a89a85a 100644 --- a/frame/election-provider-support/src/lib.rs +++ b/frame/election-provider-support/src/lib.rs @@ -387,12 +387,13 @@ pub trait ElectionProviderBase { /// [`Self::MaxWinners`]. fn desired_targets_checked() -> data_provider::Result { match Self::DataProvider::desired_targets() { - Ok(desired_targets) => + Ok(desired_targets) => { if desired_targets <= Self::MaxWinners::get() { Ok(desired_targets) } else { Err("desired_targets should not be greater than MaxWinners") - }, + } + }, Err(e) => Err(e), } } diff --git a/frame/election-provider-support/src/mock.rs b/frame/election-provider-support/src/mock.rs index 7c834f06f3cdf..d75fe6250509b 100644 --- a/frame/election-provider-support/src/mock.rs +++ b/frame/election-provider-support/src/mock.rs @@ -118,7 +118,7 @@ pub fn generate_random_votes( // distribute the available stake randomly let stake_distribution = if num_chosen_winners == 0 { - continue + continue; } else { let mut available_stake = 1000; let mut stake_distribution = Vec::with_capacity(num_chosen_winners); diff --git a/frame/election-provider-support/src/onchain.rs b/frame/election-provider-support/src/onchain.rs index 483c402fe249c..b5276205ae647 100644 --- a/frame/election-provider-support/src/onchain.rs +++ b/frame/election-provider-support/src/onchain.rs @@ -111,7 +111,7 @@ fn elect_with_input_bounds( if desired_targets > T::MaxWinners::get() { // early exit - return Err(Error::TooManyWinners) + return Err(Error::TooManyWinners); } let voters_len = voters.len() as u32; diff --git a/frame/elections-phragmen/src/lib.rs b/frame/elections-phragmen/src/lib.rs index 165a8fcab429b..9c7f7c3f0513d 100644 --- a/frame/elections-phragmen/src/lib.rs +++ b/frame/elections-phragmen/src/lib.rs @@ -905,7 +905,7 @@ impl Pallet { if candidates_and_deposit.len().is_zero() { Self::deposit_event(Event::EmptyTerm); - return T::DbWeight::get().reads(3) + return T::DbWeight::get().reads(3); } // All of the new winners that come out of phragmen will thus have a deposit recorded. @@ -937,7 +937,7 @@ impl Pallet { "Failed to run election. Number of voters exceeded", ); Self::deposit_event(Event::ElectionError); - return T::DbWeight::get().reads(3 + max_voters as u64) + return T::DbWeight::get().reads(3 + max_voters as u64); }, } @@ -1044,8 +1044,8 @@ impl Pallet { // All candidates/members/runners-up who are no longer retaining a position as a // seat holder will lose their bond. candidates_and_deposit.iter().for_each(|(c, d)| { - if new_members_ids_sorted.binary_search(c).is_err() && - new_runners_up_ids_sorted.binary_search(c).is_err() + if new_members_ids_sorted.binary_search(c).is_err() + && new_runners_up_ids_sorted.binary_search(c).is_err() { let (imbalance, _) = T::Currency::slash_reserved(c, *d); T::LoserCandidate::on_unbalanced(imbalance); diff --git a/frame/elections-phragmen/src/migrations/v4.rs b/frame/elections-phragmen/src/migrations/v4.rs index 76ef630706c50..4a840a7ab9ce8 100644 --- a/frame/elections-phragmen/src/migrations/v4.rs +++ b/frame/elections-phragmen/src/migrations/v4.rs @@ -38,7 +38,7 @@ pub fn migrate>(new_pallet_name: N) -> Weight { target: "runtime::elections-phragmen", "New pallet name is equal to the old prefix. No migration needs to be done.", ); - return Weight::zero() + return Weight::zero(); } let storage_version = StorageVersion::get::>(); log::info!( diff --git a/frame/examples/basic/src/lib.rs b/frame/examples/basic/src/lib.rs index 256529421caae..6d30580ebac17 100644 --- a/frame/examples/basic/src/lib.rs +++ b/frame/examples/basic/src/lib.rs @@ -750,7 +750,7 @@ where ) -> TransactionValidity { // if the transaction is too big, just drop it. if len > 200 { - return InvalidTransaction::ExhaustsResources.into() + return InvalidTransaction::ExhaustsResources.into(); } // check for `set_dummy` diff --git a/frame/examples/offchain-worker/src/lib.rs b/frame/examples/offchain-worker/src/lib.rs index fdf8b61a01acd..243039a163104 100644 --- a/frame/examples/offchain-worker/src/lib.rs +++ b/frame/examples/offchain-worker/src/lib.rs @@ -199,10 +199,12 @@ pub mod pallet { let should_send = Self::choose_transaction_type(block_number); let res = match should_send { TransactionType::Signed => Self::fetch_price_and_send_signed(), - TransactionType::UnsignedForAny => - Self::fetch_price_and_send_unsigned_for_any_account(block_number), - TransactionType::UnsignedForAll => - Self::fetch_price_and_send_unsigned_for_all_accounts(block_number), + TransactionType::UnsignedForAny => { + Self::fetch_price_and_send_unsigned_for_any_account(block_number) + }, + TransactionType::UnsignedForAll => { + Self::fetch_price_and_send_unsigned_for_all_accounts(block_number) + }, TransactionType::Raw => Self::fetch_price_and_send_raw_unsigned(block_number), TransactionType::None => Ok(()), }; @@ -314,7 +316,7 @@ pub mod pallet { let signature_valid = SignedPayload::::verify::(payload, signature.clone()); if !signature_valid { - return InvalidTransaction::BadProof.into() + return InvalidTransaction::BadProof.into(); } Self::validate_transaction_parameters(&payload.block_number, &payload.price) } else if let Call::submit_price_unsigned { block_number, price: new_price } = call { @@ -391,8 +393,9 @@ impl Pallet { match last_send { // If we already have a value in storage and the block number is recent enough // we avoid sending another transaction at this time. - Ok(Some(block)) if block_number < block + T::GracePeriod::get() => - Err(RECENTLY_SENT), + Ok(Some(block)) if block_number < block + T::GracePeriod::get() => { + Err(RECENTLY_SENT) + }, // In every other case we attempt to acquire the lock and send a transaction. _ => Ok(block_number), } @@ -443,7 +446,7 @@ impl Pallet { if !signer.can_sign() { return Err( "No local accounts available. Consider adding one via `author_insertKey` RPC.", - ) + ); } // Make an external HTTP request to fetch the current price. // Note this call will block until response is received. @@ -476,7 +479,7 @@ impl Pallet { // anyway. let next_unsigned_at = >::get(); if next_unsigned_at > block_number { - return Err("Too early to send unsigned transaction") + return Err("Too early to send unsigned transaction"); } // Make an external HTTP request to fetch the current price. @@ -510,7 +513,7 @@ impl Pallet { // anyway. let next_unsigned_at = >::get(); if next_unsigned_at > block_number { - return Err("Too early to send unsigned transaction") + return Err("Too early to send unsigned transaction"); } // Make an external HTTP request to fetch the current price. @@ -540,7 +543,7 @@ impl Pallet { // anyway. let next_unsigned_at = >::get(); if next_unsigned_at > block_number { - return Err("Too early to send unsigned transaction") + return Err("Too early to send unsigned transaction"); } // Make an external HTTP request to fetch the current price. @@ -558,7 +561,7 @@ impl Pallet { ); for (_account_id, result) in transaction_results.into_iter() { if result.is_err() { - return Err("Unable to submit transaction") + return Err("Unable to submit transaction"); } } @@ -594,7 +597,7 @@ impl Pallet { // Let's check the status code before we proceed to reading the response. if response.code != 200 { log::warn!("Unexpected status code: {}", response.code); - return Err(http::Error::Unknown) + return Err(http::Error::Unknown); } // Next we want to fully read the response body and collect it to a vector of bytes. @@ -674,12 +677,12 @@ impl Pallet { // Now let's check if the transaction has any chance to succeed. let next_unsigned_at = >::get(); if &next_unsigned_at > block_number { - return InvalidTransaction::Stale.into() + return InvalidTransaction::Stale.into(); } // Let's make sure to reject transactions from the future. let current_block = >::block_number(); if ¤t_block < block_number { - return InvalidTransaction::Future.into() + return InvalidTransaction::Future.into(); } // We prioritize transactions that are more far away from current average. diff --git a/frame/executive/src/lib.rs b/frame/executive/src/lib.rs index b7884efccf685..ace82366684da 100644 --- a/frame/executive/src/lib.rs +++ b/frame/executive/src/lib.rs @@ -386,9 +386,9 @@ where // Check that `parent_hash` is correct. let n = *header.number(); assert!( - n > System::BlockNumber::zero() && - >::block_hash(n - System::BlockNumber::one()) == - *header.parent_hash(), + n > System::BlockNumber::zero() + && >::block_hash(n - System::BlockNumber::one()) + == *header.parent_hash(), "Parent hash should be valid.", ); @@ -895,8 +895,8 @@ mod tests { .assimilate_storage(&mut t) .unwrap(); let xt = TestXt::new(call_transfer(2, 69), sign_extra(1, 0, 0)); - let weight = xt.get_dispatch_info().weight + - ::BlockWeights::get() + let weight = xt.get_dispatch_info().weight + + ::BlockWeights::get() .get(DispatchClass::Normal) .base_extrinsic; let fee: Balance = @@ -1092,8 +1092,8 @@ mod tests { let mut t = new_test_ext(1); t.execute_with(|| { // Block execution weight + on_initialize weight from custom module - let base_block_weight = Weight::from_ref_time(175) + - ::BlockWeights::get().base_block; + let base_block_weight = Weight::from_ref_time(175) + + ::BlockWeights::get().base_block; Executive::initialize_block(&Header::new( 1, @@ -1111,8 +1111,8 @@ mod tests { assert!(Executive::apply_extrinsic(x2.clone()).unwrap().is_ok()); // default weight for `TestXt` == encoded length. - let extrinsic_weight = Weight::from_ref_time(len as u64) + - ::BlockWeights::get() + let extrinsic_weight = Weight::from_ref_time(len as u64) + + ::BlockWeights::get() .get(DispatchClass::Normal) .base_extrinsic; assert_eq!( @@ -1183,8 +1183,8 @@ mod tests { RuntimeCall::System(SystemCall::remark { remark: vec![1u8] }), sign_extra(1, 0, 0), ); - let weight = xt.get_dispatch_info().weight + - ::BlockWeights::get() + let weight = xt.get_dispatch_info().weight + + ::BlockWeights::get() .get(DispatchClass::Normal) .base_extrinsic; let fee: Balance = @@ -1428,9 +1428,10 @@ mod tests { // Weights are recorded correctly assert_eq!( frame_system::Pallet::::block_weight().total(), - custom_runtime_upgrade_weight + - runtime_upgrade_weight + - on_initialize_weight + base_block_weight, + custom_runtime_upgrade_weight + + runtime_upgrade_weight + + on_initialize_weight + + base_block_weight, ); }); } diff --git a/frame/fast-unstake/src/lib.rs b/frame/fast-unstake/src/lib.rs index c83054189feb7..e337b2fc13b0e 100644 --- a/frame/fast-unstake/src/lib.rs +++ b/frame/fast-unstake/src/lib.rs @@ -203,7 +203,7 @@ pub mod pallet { impl Hooks for Pallet { fn on_idle(_: T::BlockNumber, remaining_weight: Weight) -> Weight { if remaining_weight.any_lt(T::DbWeight::get().reads(2)) { - return Weight::from_ref_time(0) + return Weight::from_ref_time(0); } Self::do_on_idle(remaining_weight) @@ -323,7 +323,7 @@ pub mod pallet { pub(crate) fn do_on_idle(remaining_weight: Weight) -> Weight { let mut eras_to_check_per_block = ErasToCheckPerBlock::::get(); if eras_to_check_per_block.is_zero() { - return T::DbWeight::get().reads(1) + return T::DbWeight::get().reads(1); } // NOTE: here we're assuming that the number of validators has only ever increased, @@ -340,7 +340,7 @@ pub mod pallet { eras_to_check_per_block.saturating_dec(); if eras_to_check_per_block.is_zero() { log!(debug, "early existing because eras_to_check_per_block is zero"); - return T::DbWeight::get().reads(2) + return T::DbWeight::get().reads(2); } } @@ -349,7 +349,7 @@ pub mod pallet { // there is an ongoing election -- we better not do anything. Imagine someone is not // exposed anywhere in the last era, and the snapshot for the election is already // taken. In this time period, we don't want to accidentally unstake them. - return T::DbWeight::get().reads(2) + return T::DbWeight::get().reads(2); } let UnstakeRequest { stashes, mut checked } = match Head::::take().or_else(|| { @@ -367,7 +367,7 @@ pub mod pallet { }) { None => { // There's no `Head` and nothing in the `Queue`, nothing to do here. - return T::DbWeight::get().reads(4) + return T::DbWeight::get().reads(4); }, Some(head) => head, }; @@ -391,8 +391,8 @@ pub mod pallet { let unchecked_eras_to_check = { // get the last available `bonding_duration` eras up to current era in reverse // order. - let total_check_range = (current_era.saturating_sub(bonding_duration)..= - current_era) + let total_check_range = (current_era.saturating_sub(bonding_duration) + ..=current_era) .rev() .collect::>(); debug_assert!( @@ -473,7 +473,7 @@ pub mod pallet { ); match checked.try_extend(unchecked_eras_to_check.clone().into_iter()) { - Ok(_) => + Ok(_) => { if stashes.is_empty() { Self::deposit_event(Event::::BatchFinished); } else { @@ -481,7 +481,8 @@ pub mod pallet { Self::deposit_event(Event::::BatchChecked { eras: unchecked_eras_to_check, }); - }, + } + }, Err(_) => { // don't put the head back in -- there is an internal error in the pallet. Self::halt("checked is pruned via retain above") diff --git a/frame/fast-unstake/src/mock.rs b/frame/fast-unstake/src/mock.rs index d66f4ba5663d9..17d48c5a0fd5e 100644 --- a/frame/fast-unstake/src/mock.rs +++ b/frame/fast-unstake/src/mock.rs @@ -251,8 +251,8 @@ impl ExtBuilder { (VALIDATOR_PREFIX..VALIDATOR_PREFIX + VALIDATORS_PER_ERA) .map(|v| { // for the sake of sanity, let's register this taker as an actual validator. - let others = (NOMINATOR_PREFIX.. - (NOMINATOR_PREFIX + NOMINATORS_PER_VALIDATOR_PER_ERA)) + let others = (NOMINATOR_PREFIX + ..(NOMINATOR_PREFIX + NOMINATORS_PER_VALIDATOR_PER_ERA)) .map(|n| IndividualExposure { who: n, value: 0 as Balance }) .collect::>(); (v, Exposure { total: 0, own: 0, others }) diff --git a/frame/gilt/src/lib.rs b/frame/gilt/src/lib.rs index 28a0f5fd56e67..5bed1eb12dbe6 100644 --- a/frame/gilt/src/lib.rs +++ b/frame/gilt/src/lib.rs @@ -598,7 +598,7 @@ pub mod pallet { QueueTotals::::mutate(|qs| { for duration in (1..=T::QueueCount::get()).rev() { if qs[duration as usize - 1].0 == 0 { - continue + continue; } let queue_index = duration as usize - 1; let expiry = @@ -644,14 +644,14 @@ pub mod pallet { bids_taken += 1; if remaining.is_zero() || bids_taken == max_bids { - break + break; } } queues_hit += 1; qs[queue_index].0 = q.len() as u32; }); if remaining.is_zero() || bids_taken == max_bids { - break + break; } } }); diff --git a/frame/grandpa/src/equivocation.rs b/frame/grandpa/src/equivocation.rs index 181d22fba545c..37b5d57faa7e1 100644 --- a/frame/grandpa/src/equivocation.rs +++ b/frame/grandpa/src/equivocation.rs @@ -215,7 +215,7 @@ impl Pallet { "rejecting unsigned report equivocation transaction because it is not local/in-block." ); - return InvalidTransaction::Call.into() + return InvalidTransaction::Call.into(); }, } diff --git a/frame/grandpa/src/lib.rs b/frame/grandpa/src/lib.rs index fe5b9861853bf..23f81193c8d6e 100644 --- a/frame/grandpa/src/lib.rs +++ b/frame/grandpa/src/lib.rs @@ -465,7 +465,7 @@ impl Pallet { if forced.is_some() { if Self::next_forced().map_or(false, |next| next > scheduled_at) { - return Err(Error::::TooSoon.into()) + return Err(Error::::TooSoon.into()); } // only allow the next forced change when twice the window has passed since @@ -538,7 +538,7 @@ impl Pallet { // validate equivocation proof (check votes are different and // signatures are valid). if !sp_finality_grandpa::check_equivocation_proof(equivocation_proof) { - return Err(Error::::InvalidEquivocationProof.into()) + return Err(Error::::InvalidEquivocationProof.into()); } // fetch the current and previous sets last session index. on the @@ -557,12 +557,12 @@ impl Pallet { // check that the session id for the membership proof is within the // bounds of the set id reported in the equivocation. - if session_index > set_id_session_index || - previous_set_id_session_index + if session_index > set_id_session_index + || previous_set_id_session_index .map(|previous_index| session_index <= previous_index) .unwrap_or(false) { - return Err(Error::::InvalidEquivocationProof.into()) + return Err(Error::::InvalidEquivocationProof.into()); } // report to the offences module rewarding the sender. diff --git a/frame/grandpa/src/migrations/v4.rs b/frame/grandpa/src/migrations/v4.rs index 81dbd3bab4b67..3746953e016a3 100644 --- a/frame/grandpa/src/migrations/v4.rs +++ b/frame/grandpa/src/migrations/v4.rs @@ -37,7 +37,7 @@ pub fn migrate>(new_pallet_name: N) -> Weight { target: "runtime::afg", "New pallet name is equal to the old prefix. No migration needs to be done.", ); - return Weight::zero() + return Weight::zero(); } let storage_version = StorageVersion::get::>(); log::info!( diff --git a/frame/grandpa/src/tests.rs b/frame/grandpa/src/tests.rs index 626decd12821e..2de1d46d606d4 100644 --- a/frame/grandpa/src/tests.rs +++ b/frame/grandpa/src/tests.rs @@ -379,7 +379,7 @@ fn report_equivocation_current_set_works() { // check that the balances of all other validators are left intact. for validator in &validators { if *validator == equivocation_validator_id { - continue + continue; } assert_eq!(Balances::total_balance(validator), 10_000_000); @@ -458,7 +458,7 @@ fn report_equivocation_old_set_works() { // check that the balances of all other validators are left intact. for validator in &validators { if *validator == equivocation_validator_id { - continue + continue; } assert_eq!(Balances::total_balance(validator), 10_000_000); diff --git a/frame/identity/src/benchmarking.rs b/frame/identity/src/benchmarking.rs index 3584eb954b399..4629b950dd786 100644 --- a/frame/identity/src/benchmarking.rs +++ b/frame/identity/src/benchmarking.rs @@ -45,14 +45,16 @@ fn add_registrars(r: u32) -> Result<(), &'static str> { let registrar_origin = T::RegistrarOrigin::successful_origin(); Identity::::add_registrar(registrar_origin, registrar_lookup)?; Identity::::set_fee(RawOrigin::Signed(registrar.clone()).into(), i, 10u32.into())?; - let fields = - IdentityFields( - IdentityField::Display | - IdentityField::Legal | IdentityField::Web | - IdentityField::Riot | IdentityField::Email | - IdentityField::PgpFingerprint | - IdentityField::Image | IdentityField::Twitter, - ); + let fields = IdentityFields( + IdentityField::Display + | IdentityField::Legal + | IdentityField::Web + | IdentityField::Riot + | IdentityField::Email + | IdentityField::PgpFingerprint + | IdentityField::Image + | IdentityField::Twitter, + ); Identity::::set_fields(RawOrigin::Signed(registrar.clone()).into(), i, fields)?; } diff --git a/frame/identity/src/lib.rs b/frame/identity/src/lib.rs index 95f5a84d8abb7..01cbcca63abe9 100644 --- a/frame/identity/src/lib.rs +++ b/frame/identity/src/lib.rs @@ -546,14 +546,16 @@ pub mod pallet { let item = (reg_index, Judgement::FeePaid(registrar.fee)); match id.judgements.binary_search_by_key(®_index, |x| x.0) { - Ok(i) => + Ok(i) => { if id.judgements[i].1.is_sticky() { - return Err(Error::::StickyJudgement.into()) + return Err(Error::::StickyJudgement.into()); } else { id.judgements[i] = item - }, - Err(i) => - id.judgements.try_insert(i, item).map_err(|_| Error::::TooManyRegistrars)?, + } + }, + Err(i) => { + id.judgements.try_insert(i, item).map_err(|_| Error::::TooManyRegistrars)? + }, } T::Currency::reserve(&sender, registrar.fee)?; @@ -606,7 +608,7 @@ pub mod pallet { let fee = if let Judgement::FeePaid(fee) = id.judgements.remove(pos).1 { fee } else { - return Err(Error::::JudgementGiven.into()) + return Err(Error::::JudgementGiven.into()); }; let err_amount = T::Currency::unreserve(&sender, fee); @@ -783,7 +785,7 @@ pub mod pallet { let mut id = >::get(&target).ok_or(Error::::InvalidTarget)?; if T::Hashing::hash_of(&id.info) != identity { - return Err(Error::::JudgementForDifferentIdentity.into()) + return Err(Error::::JudgementForDifferentIdentity.into()); } let item = (reg_index, judgement); diff --git a/frame/identity/src/types.rs b/frame/identity/src/types.rs index b1f15da3b1117..419fd05fd0ea2 100644 --- a/frame/identity/src/types.rs +++ b/frame/identity/src/types.rs @@ -396,8 +396,9 @@ impl< > Registration { pub(crate) fn total_deposit(&self) -> Balance { - self.deposit + - self.judgements + self.deposit + + self + .judgements .iter() .map(|(_, ref j)| if let Judgement::FeePaid(fee) = j { *fee } else { Zero::zero() }) .fold(Zero::zero(), |a, i| a + i) diff --git a/frame/im-online/src/lib.rs b/frame/im-online/src/lib.rs index 342522ff29b19..80ced4e4ec015 100644 --- a/frame/im-online/src/lib.rs +++ b/frame/im-online/src/lib.rs @@ -552,19 +552,19 @@ pub mod pallet { if let Call::heartbeat { heartbeat, signature } = call { if >::is_online(heartbeat.authority_index) { // we already received a heartbeat for this authority - return InvalidTransaction::Stale.into() + return InvalidTransaction::Stale.into(); } // check if session index from heartbeat is recent let current_session = T::ValidatorSet::session_index(); if heartbeat.session_index != current_session { - return InvalidTransaction::Stale.into() + return InvalidTransaction::Stale.into(); } // verify that the incoming (unverified) pubkey is actually an authority id let keys = Keys::::get(); if keys.len() as u32 != heartbeat.validators_len { - return InvalidTransaction::Custom(INVALID_VALIDATORS_LEN).into() + return InvalidTransaction::Custom(INVALID_VALIDATORS_LEN).into(); } let authority_id = match keys.get(heartbeat.authority_index as usize) { Some(id) => id, @@ -577,7 +577,7 @@ pub mod pallet { }); if !signature_valid { - return InvalidTransaction::BadProof.into() + return InvalidTransaction::BadProof.into(); } ValidTransaction::with_tag_prefix("ImOnline") @@ -621,7 +621,7 @@ impl Pallet { let current_validators = T::ValidatorSet::validators(); if authority_index >= current_validators.len() as u32 { - return false + return false; } let authority = ¤t_validators[authority_index as usize]; @@ -632,8 +632,8 @@ impl Pallet { fn is_online_aux(authority_index: AuthIndex, authority: &ValidatorId) -> bool { let current_session = T::ValidatorSet::session_index(); - ReceivedHeartbeats::::contains_key(¤t_session, &authority_index) || - AuthoredBlocks::::get(¤t_session, authority) != 0 + ReceivedHeartbeats::::contains_key(¤t_session, &authority_index) + || AuthoredBlocks::::get(¤t_session, authority) != 0 } /// Returns `true` if a heartbeat has been received for the authority at `authority_index` in @@ -683,8 +683,8 @@ impl Pallet { // haven't sent an heartbeat yet we'll send one unconditionally. the idea is to prevent // all nodes from sending the heartbeats at the same block and causing a temporary (but // deterministic) spike in transactions. - progress >= START_HEARTBEAT_FINAL_PERIOD || - progress >= START_HEARTBEAT_RANDOM_PERIOD && random_choice(progress) + progress >= START_HEARTBEAT_FINAL_PERIOD + || progress >= START_HEARTBEAT_RANDOM_PERIOD && random_choice(progress) } else { // otherwise we fallback to using the block number calculated at the beginning // of the session that should roughly correspond to the middle of the session @@ -693,7 +693,7 @@ impl Pallet { }; if !should_heartbeat { - return Err(OffchainErr::TooEarly) + return Err(OffchainErr::TooEarly); } let session_index = T::ValidatorSet::session_index(); @@ -735,7 +735,7 @@ impl Pallet { }; if Self::is_online(authority_index) { - return Err(OffchainErr::AlreadyOnline(authority_index)) + return Err(OffchainErr::AlreadyOnline(authority_index)); } // acquire lock for that authority at current heartbeat to make sure we don't @@ -801,15 +801,16 @@ impl Pallet { // we will re-send it. match status { // we are still waiting for inclusion. - Ok(Some(status)) if status.is_recent(session_index, now) => - Err(OffchainErr::WaitingForInclusion(status.sent_at)), + Ok(Some(status)) if status.is_recent(session_index, now) => { + Err(OffchainErr::WaitingForInclusion(status.sent_at)) + }, // attempt to set new status _ => Ok(HeartbeatStatus { session_index, sent_at: now }), } }, ); if let Err(MutateStorageError::ValueFunctionFailed(err)) = res { - return Err(err) + return Err(err); } let mut new_status = res.map_err(|_| OffchainErr::FailedToAcquireLock)?; diff --git a/frame/im-online/src/tests.rs b/frame/im-online/src/tests.rs index 366119278d836..a4db3f104a824 100644 --- a/frame/im-online/src/tests.rs +++ b/frame/im-online/src/tests.rs @@ -137,8 +137,9 @@ fn heartbeat( signature: signature.clone(), }) .map_err(|e| match e { - TransactionValidityError::Invalid(InvalidTransaction::Custom(INVALID_VALIDATORS_LEN)) => - "invalid validators len", + TransactionValidityError::Invalid(InvalidTransaction::Custom(INVALID_VALIDATORS_LEN)) => { + "invalid validators len" + }, e @ _ => <&'static str>::from(e), })?; ImOnline::heartbeat(RuntimeOrigin::none(), heartbeat, signature) @@ -240,8 +241,9 @@ fn should_generate_heartbeats() { // check stuff about the transaction. let ex: Extrinsic = Decode::decode(&mut &*transaction).unwrap(); let heartbeat = match ex.call { - crate::mock::RuntimeCall::ImOnline(crate::Call::heartbeat { heartbeat, .. }) => - heartbeat, + crate::mock::RuntimeCall::ImOnline(crate::Call::heartbeat { heartbeat, .. }) => { + heartbeat + }, e => panic!("Unexpected call: {:?}", e), }; @@ -356,8 +358,9 @@ fn should_not_send_a_report_if_already_online() { // check stuff about the transaction. let ex: Extrinsic = Decode::decode(&mut &*transaction).unwrap(); let heartbeat = match ex.call { - crate::mock::RuntimeCall::ImOnline(crate::Call::heartbeat { heartbeat, .. }) => - heartbeat, + crate::mock::RuntimeCall::ImOnline(crate::Call::heartbeat { heartbeat, .. }) => { + heartbeat + }, e => panic!("Unexpected call: {:?}", e), }; diff --git a/frame/lottery/src/lib.rs b/frame/lottery/src/lib.rs index c501a30ef5f4a..5371730cc11f1 100644 --- a/frame/lottery/src/lib.rs +++ b/frame/lottery/src/lib.rs @@ -267,11 +267,11 @@ pub mod pallet { LotteryIndex::::mutate(|index| *index = index.saturating_add(1)); // Set a new start with the current block. config.start = n; - return T::WeightInfo::on_initialize_repeat() + return T::WeightInfo::on_initialize_repeat(); } else { // Else, kill the lottery storage. *lottery = None; - return T::WeightInfo::on_initialize_end() + return T::WeightInfo::on_initialize_end(); } // We choose not need to kill Participants and Tickets to avoid a large // number of writes at one time. Instead, data persists between lotteries, @@ -424,7 +424,7 @@ impl Pallet { fn call_to_index(call: &::RuntimeCall) -> Result { let encoded_call = call.encode(); if encoded_call.len() < 2 { - return Err(Error::::EncodingFailed.into()) + return Err(Error::::EncodingFailed.into()); } Ok((encoded_call[0], encoded_call[1])) } @@ -487,14 +487,14 @@ impl Pallet { /// Returns `None` if there are no tickets. fn choose_ticket(total: u32) -> Option { if total == 0 { - return None + return None; } let mut random_number = Self::generate_random_number(0); // Best effort attempt to remove bias from modulus operator. for i in 1..T::MaxGenerateRandom::get() { if random_number < u32::MAX - u32::MAX % total { - break + break; } random_number = Self::generate_random_number(i); diff --git a/frame/membership/src/lib.rs b/frame/membership/src/lib.rs index 4191bbcc5d86e..1971291dec819 100644 --- a/frame/membership/src/lib.rs +++ b/frame/membership/src/lib.rs @@ -222,7 +222,7 @@ pub mod pallet { let add = T::Lookup::lookup(add)?; if remove == add { - return Ok(()) + return Ok(()); } let mut members = >::get(); diff --git a/frame/membership/src/migrations/v4.rs b/frame/membership/src/migrations/v4.rs index 5b8735aa2bac9..b72d8e162a2d4 100644 --- a/frame/membership/src/migrations/v4.rs +++ b/frame/membership/src/migrations/v4.rs @@ -46,7 +46,7 @@ pub fn migrate::on_chain_storage_version(); @@ -85,7 +85,7 @@ pub fn pre_migrate>(old_pallet_name: N, new_ log_migration("pre-migration", old_pallet_name, new_pallet_name); if new_pallet_name == old_pallet_name { - return + return; } let new_pallet_prefix = twox_128(new_pallet_name.as_bytes()); @@ -113,7 +113,7 @@ pub fn post_migrate>(old_pallet_name: N, new log_migration("post-migration", old_pallet_name, new_pallet_name); if new_pallet_name == old_pallet_name { - return + return; } // Assert that nothing remains at the old prefix. diff --git a/frame/merkle-mountain-range/rpc/src/lib.rs b/frame/merkle-mountain-range/rpc/src/lib.rs index 8476d82f3e70d..0fbc807d3d01d 100644 --- a/frame/merkle-mountain-range/rpc/src/lib.rs +++ b/frame/merkle-mountain-range/rpc/src/lib.rs @@ -233,8 +233,8 @@ where /// Converts an mmr-specific error into a [`CallError`]. fn mmr_error_into_rpc_error(err: MmrError) -> CallError { - let error_code = MMR_ERROR + - match err { + let error_code = MMR_ERROR + + match err { MmrError::LeafNotFound => 1, MmrError::GenerateProof => 2, MmrError::Verify => 3, diff --git a/frame/merkle-mountain-range/src/lib.rs b/frame/merkle-mountain-range/src/lib.rs index a2d42417ae5dc..207bfdf7f2dc6 100644 --- a/frame/merkle-mountain-range/src/lib.rs +++ b/frame/merkle-mountain-range/src/lib.rs @@ -227,14 +227,14 @@ pub mod pallet { // MMR push never fails, but better safe than sorry. if mmr.push(data).is_none() { log::error!(target: "runtime::mmr", "MMR push failed"); - return T::WeightInfo::on_initialize(peaks_before) + return T::WeightInfo::on_initialize(peaks_before); } // Update the size, `mmr.finalize()` should also never fail. let (leaves, root) = match mmr.finalize() { Ok((leaves, root)) => (leaves, root), Err(e) => { log::error!(target: "runtime::mmr", "MMR finalize failed: {:?}", e); - return T::WeightInfo::on_initialize(peaks_before) + return T::WeightInfo::on_initialize(peaks_before); }, }; >::on_new_root(&root); @@ -428,12 +428,12 @@ impl, I: 'static> Pallet { leaves: Vec>, proof: primitives::Proof<>::Hash>, ) -> Result<(), primitives::Error> { - if proof.leaf_count > Self::mmr_leaves() || - proof.leaf_count == 0 || - (proof.items.len().saturating_add(leaves.len())) as u64 > proof.leaf_count + if proof.leaf_count > Self::mmr_leaves() + || proof.leaf_count == 0 + || (proof.items.len().saturating_add(leaves.len())) as u64 > proof.leaf_count { return Err(primitives::Error::Verify - .log_debug("The proof has incorrect number of leaves or proof items.")) + .log_debug("The proof has incorrect number of leaves or proof items.")); } let mmr: ModuleMmr = mmr::Mmr::new(proof.leaf_count); diff --git a/frame/merkle-mountain-range/src/mmr/mmr.rs b/frame/merkle-mountain-range/src/mmr/mmr.rs index 1f5a5bdae380b..7d45ffb9f3eb4 100644 --- a/frame/merkle-mountain-range/src/mmr/mmr.rs +++ b/frame/merkle-mountain-range/src/mmr/mmr.rs @@ -42,7 +42,7 @@ where let size = NodesUtils::new(proof.leaf_count).size(); if leaves.len() != proof.leaf_indices.len() { - return Err(Error::Verify.log_debug("Proof leaf_indices not same length with leaves")) + return Err(Error::Verify.log_debug("Proof leaf_indices not same length with leaves")); } let leaves_and_position_data = proof @@ -103,7 +103,7 @@ where ); if leaves.len() != proof.leaf_indices.len() { - return Err(Error::Verify.log_debug("Proof leaf_indices not same length with leaves")) + return Err(Error::Verify.log_debug("Proof leaf_indices not same length with leaves")); } let leaves_positions_and_data = proof diff --git a/frame/merkle-mountain-range/src/mmr/storage.rs b/frame/merkle-mountain-range/src/mmr/storage.rs index d16ca8cf1e5c8..297337aaf2d37 100644 --- a/frame/merkle-mountain-range/src/mmr/storage.rs +++ b/frame/merkle-mountain-range/src/mmr/storage.rs @@ -230,7 +230,7 @@ where // change and maybe we mess up storage migration, // return _if and only if_ node is found (in normal conditions it's always found), if let Some(elem) = sp_io::offchain::local_storage_get(StorageKind::PERSISTENT, &key) { - return Ok(codec::Decode::decode(&mut &*elem).ok()) + return Ok(codec::Decode::decode(&mut &*elem).ok()); } // BUT if we DID MESS UP, fall through to searching node using fork-specific key. } @@ -274,7 +274,7 @@ where fn append(&mut self, pos: NodeIndex, elems: Vec>) -> mmr_lib::Result<()> { if elems.is_empty() { - return Ok(()) + return Ok(()); } trace!( @@ -286,7 +286,7 @@ where let size = NodesUtils::new(leaves).size(); if pos != size { - return Err(mmr_lib::Error::InconsistentStore) + return Err(mmr_lib::Error::InconsistentStore); } let new_size = size + elems.len() as NodeIndex; diff --git a/frame/merkle-mountain-range/src/mmr/utils.rs b/frame/merkle-mountain-range/src/mmr/utils.rs index 0b8e88a9283da..6cd9b9c273ec7 100644 --- a/frame/merkle-mountain-range/src/mmr/utils.rs +++ b/frame/merkle-mountain-range/src/mmr/utils.rs @@ -55,7 +55,7 @@ impl NodesUtils { // Translate a _leaf_ `NodeIndex` to its `LeafIndex`. fn leaf_node_index_to_leaf_index(pos: NodeIndex) -> LeafIndex { if pos == 0 { - return 0 + return 0; } let peaks = helper::get_peaks(pos); (pos + peaks.len() as u64) >> 1 @@ -73,7 +73,7 @@ impl NodesUtils { pub fn right_branch_ending_in_leaf(leaf_index: LeafIndex) -> crate::Vec { let pos = helper::leaf_index_to_pos(leaf_index); let num_parents = leaf_index.trailing_ones() as u64; - return (pos..=pos + num_parents).collect() + return (pos..=pos + num_parents).collect(); } } diff --git a/frame/multisig/src/migrations.rs b/frame/multisig/src/migrations.rs index 5085297cde433..12087e6f15e8c 100644 --- a/frame/multisig/src/migrations.rs +++ b/frame/multisig/src/migrations.rs @@ -59,7 +59,7 @@ pub mod v1 { if onchain > 0 { log!(info, "MigrateToV1 should be removed"); - return T::DbWeight::get().reads(1) + return T::DbWeight::get().reads(1); } Calls::::drain().for_each(|(_call_hash, (_data, caller, deposit))| { diff --git a/frame/nft-fractionalisation/src/lib.rs b/frame/nft-fractionalisation/src/lib.rs index 5d41ea88319e3..544b3c1a81d47 100644 --- a/frame/nft-fractionalisation/src/lib.rs +++ b/frame/nft-fractionalisation/src/lib.rs @@ -136,8 +136,7 @@ pub mod pallet { match Self::do_lock_nft(origin.clone(), collection_id, item_id) { Err(e) => return Err(e), //Ok(()) => match Self::do_create_asset(origin.clone(), asset_id, admin, min_balance) - Ok(()) => match Self::do_create_asset(asset_id, admin_account_id, min_balance) - { + Ok(()) => match Self::do_create_asset(asset_id, admin_account_id, min_balance) { Err(e) => return Err(e), Ok(()) => match Self::do_mint_asset( // Minting the asset is only possible from the pallet's origin. diff --git a/frame/nfts/src/features/attributes.rs b/frame/nfts/src/features/attributes.rs index 85c1e0b302d12..32fff24b92550 100644 --- a/frame/nfts/src/features/attributes.rs +++ b/frame/nfts/src/features/attributes.rs @@ -60,8 +60,8 @@ impl, I: 'static> Pallet { let old_deposit = attribute.map_or(Zero::zero(), |m| m.1); collection_details.total_deposit.saturating_reduce(old_deposit); let mut deposit = Zero::zero(); - if collection_config.is_setting_enabled(CollectionSetting::DepositRequired) && - maybe_check_owner.is_some() + if collection_config.is_setting_enabled(CollectionSetting::DepositRequired) + && maybe_check_owner.is_some() { deposit = T::DepositPerByte::get() .saturating_mul(((key.len() + value.len()) as u32).into()) diff --git a/frame/nfts/src/features/metadata.rs b/frame/nfts/src/features/metadata.rs index 0b0a337197d9b..679a86fd21b00 100644 --- a/frame/nfts/src/features/metadata.rs +++ b/frame/nfts/src/features/metadata.rs @@ -30,8 +30,8 @@ impl, I: 'static> Pallet { let item_config = Self::get_item_config(&collection, &item)?; ensure!( - maybe_check_owner.is_none() || - item_config.is_setting_enabled(ItemSetting::UnlockedMetadata), + maybe_check_owner.is_none() + || item_config.is_setting_enabled(ItemSetting::UnlockedMetadata), Error::::LockedItemMetadata ); @@ -48,8 +48,8 @@ impl, I: 'static> Pallet { let old_deposit = metadata.take().map_or(Zero::zero(), |m| m.deposit); collection_details.total_deposit.saturating_reduce(old_deposit); let mut deposit = Zero::zero(); - if collection_config.is_setting_enabled(CollectionSetting::DepositRequired) && - maybe_check_owner.is_some() + if collection_config.is_setting_enabled(CollectionSetting::DepositRequired) + && maybe_check_owner.is_some() { deposit = T::DepositPerByte::get() .saturating_mul(((data.len()) as u32).into()) @@ -108,8 +108,8 @@ impl, I: 'static> Pallet { ) -> DispatchResult { let collection_config = Self::get_collection_config(&collection)?; ensure!( - maybe_check_owner.is_none() || - collection_config.is_setting_enabled(CollectionSetting::UnlockedMetadata), + maybe_check_owner.is_none() + || collection_config.is_setting_enabled(CollectionSetting::UnlockedMetadata), Error::::LockedCollectionMetadata ); @@ -123,8 +123,8 @@ impl, I: 'static> Pallet { let old_deposit = metadata.take().map_or(Zero::zero(), |m| m.deposit); details.total_deposit.saturating_reduce(old_deposit); let mut deposit = Zero::zero(); - if maybe_check_owner.is_some() && - collection_config.is_setting_enabled(CollectionSetting::DepositRequired) + if maybe_check_owner.is_some() + && collection_config.is_setting_enabled(CollectionSetting::DepositRequired) { deposit = T::DepositPerByte::get() .saturating_mul(((data.len()) as u32).into()) @@ -158,8 +158,8 @@ impl, I: 'static> Pallet { let collection_config = Self::get_collection_config(&collection)?; ensure!( - maybe_check_owner.is_none() || - collection_config.is_setting_enabled(CollectionSetting::UnlockedMetadata), + maybe_check_owner.is_none() + || collection_config.is_setting_enabled(CollectionSetting::UnlockedMetadata), Error::::LockedCollectionMetadata ); diff --git a/frame/nfts/src/features/settings.rs b/frame/nfts/src/features/settings.rs index 5f408ed183c35..2bf782eac5fe4 100644 --- a/frame/nfts/src/features/settings.rs +++ b/frame/nfts/src/features/settings.rs @@ -98,6 +98,6 @@ impl, I: 'static> Pallet { pub(crate) fn is_pallet_feature_enabled(feature: PalletFeature) -> bool { let features = T::Features::get(); - return features.is_enabled(feature) + return features.is_enabled(feature); } } diff --git a/frame/nfts/src/features/transfer.rs b/frame/nfts/src/features/transfer.rs index 7ebad853902a9..178c06189fa94 100644 --- a/frame/nfts/src/features/transfer.rs +++ b/frame/nfts/src/features/transfer.rs @@ -93,7 +93,7 @@ impl, I: 'static> Pallet { let details = maybe_details.as_mut().ok_or(Error::::UnknownCollection)?; ensure!(origin == details.owner, Error::::NoPermission); if details.owner == owner { - return Ok(()) + return Ok(()); } // Move the deposit to the new owner. @@ -143,7 +143,7 @@ impl, I: 'static> Pallet { Collection::::try_mutate(collection, |maybe_details| { let details = maybe_details.as_mut().ok_or(Error::::UnknownCollection)?; if details.owner == owner { - return Ok(()) + return Ok(()); } // Move the deposit to the new owner. diff --git a/frame/nfts/src/impl_nonfungibles.rs b/frame/nfts/src/impl_nonfungibles.rs index b42147e6687d9..32501c6318240 100644 --- a/frame/nfts/src/impl_nonfungibles.rs +++ b/frame/nfts/src/impl_nonfungibles.rs @@ -84,9 +84,11 @@ impl, I: 'static> Inspect<::AccountId> for Palle ItemConfigOf::::get(collection, item), ) { (Some(cc), Some(ic)) - if cc.is_setting_enabled(CollectionSetting::TransferableItems) && - ic.is_setting_enabled(ItemSetting::Transferable) => - true, + if cc.is_setting_enabled(CollectionSetting::TransferableItems) + && ic.is_setting_enabled(ItemSetting::Transferable) => + { + true + }, _ => false, } } @@ -160,7 +162,7 @@ impl, I: 'static> Mutate<::AccountId, ItemConfig Self::do_burn(*collection, *item, |d| { if let Some(check_owner) = maybe_check_owner { if &d.owner != check_owner { - return Err(Error::::NoPermission.into()) + return Err(Error::::NoPermission.into()); } } Ok(()) diff --git a/frame/nfts/src/lib.rs b/frame/nfts/src/lib.rs index 0f3d3c89c2932..0fae61cd57053 100644 --- a/frame/nfts/src/lib.rs +++ b/frame/nfts/src/lib.rs @@ -727,12 +727,13 @@ pub mod pallet { }, MintType::HolderOf(collection_id) => { let correct_witness = match witness_data { - Some(MintWitness { owner_of_item }) => + Some(MintWitness { owner_of_item }) => { Account::::contains_key(( &caller, &collection_id, &owner_of_item, - )), + )) + }, None => false, }; ensure!(correct_witness, Error::::BadWitness) @@ -911,10 +912,10 @@ pub mod pallet { if T::Currency::reserve(&details.deposit.account, deposit - old).is_err() { // NOTE: No alterations made to collection_details in this iteration so far, // so this is OK to do. - continue + continue; } } else { - continue + continue; } details.deposit.amount = deposit; Item::::insert(&collection, &item, &details); diff --git a/frame/nfts/src/tests.rs b/frame/nfts/src/tests.rs index b58c81b1d70f8..33fcc3dfe4a0c 100644 --- a/frame/nfts/src/tests.rs +++ b/frame/nfts/src/tests.rs @@ -1606,12 +1606,7 @@ fn claim_swap_should_work() { assert_ok!(Nfts::force_create(RuntimeOrigin::root(), user_1, default_collection_config())); - assert_ok!(Nfts::mint( - RuntimeOrigin::signed(user_1), - collection_id, - item_1, - None, - )); + assert_ok!(Nfts::mint(RuntimeOrigin::signed(user_1), collection_id, item_1, None,)); assert_ok!(Nfts::force_mint( RuntimeOrigin::signed(user_1), collection_id, @@ -1626,12 +1621,7 @@ fn claim_swap_should_work() { user_2, default_item_config(), )); - assert_ok!(Nfts::mint( - RuntimeOrigin::signed(user_1), - collection_id, - item_4, - None, - )); + assert_ok!(Nfts::mint(RuntimeOrigin::signed(user_1), collection_id, item_4, None,)); assert_ok!(Nfts::force_mint( RuntimeOrigin::signed(user_1), collection_id, @@ -1860,9 +1850,9 @@ fn collection_locking_should_work() { let stored_config = CollectionConfigOf::::get(collection_id).unwrap(); let full_lock_config = collection_config_from_disabled_settings( - CollectionSetting::TransferableItems | - CollectionSetting::UnlockedMetadata | - CollectionSetting::UnlockedAttributes, + CollectionSetting::TransferableItems + | CollectionSetting::UnlockedMetadata + | CollectionSetting::UnlockedAttributes, ); assert_eq!(stored_config, full_lock_config); }); diff --git a/frame/node-authorization/src/lib.rs b/frame/node-authorization/src/lib.rs index 638a96eb3321a..bf0ac73ab7d06 100644 --- a/frame/node-authorization/src/lib.rs +++ b/frame/node-authorization/src/lib.rs @@ -275,7 +275,7 @@ pub mod pallet { ensure!(add.0.len() < T::MaxPeerIdLength::get() as usize, Error::::PeerIdTooLong); if remove == add { - return Ok(()) + return Ok(()); } let mut nodes = WellKnownNodes::::get(); @@ -394,7 +394,7 @@ pub mod pallet { for add_node in connections.iter() { if *add_node == node { - continue + continue; } nodes.insert(add_node.clone()); } diff --git a/frame/nomination-pools/fuzzer/src/call.rs b/frame/nomination-pools/fuzzer/src/call.rs index b07903609e8ab..95a3ad37f34e5 100644 --- a/frame/nomination-pools/fuzzer/src/call.rs +++ b/frame/nomination-pools/fuzzer/src/call.rs @@ -173,7 +173,7 @@ impl RewardAgent { fn join(&mut self) { if self.pool_id.is_some() { - return + return; } let pool_id = LastPoolId::::get(); let amount = 10 * ExistentialDeposit::get(); @@ -189,7 +189,7 @@ impl RewardAgent { // calculated. if !PoolMembers::::contains_key(&self.who) { log!(warn, "reward agent is not in the pool yet, cannot claim"); - return + return; } let pre = Balances::free_balance(&42); let origin = RuntimeOrigin::signed(42); @@ -266,8 +266,8 @@ fn main() { } // execute sanity checks at a fixed interval, possibly on every block. - if iteration % - (std::env::var("SANITY_CHECK_INTERVAL") + if iteration + % (std::env::var("SANITY_CHECK_INTERVAL") .ok() .and_then(|x| x.parse::().ok())) .unwrap_or(1) == 0 diff --git a/frame/nomination-pools/src/lib.rs b/frame/nomination-pools/src/lib.rs index 9ca9539b3dca8..bce737b655296 100644 --- a/frame/nomination-pools/src/lib.rs +++ b/frame/nomination-pools/src/lib.rs @@ -502,9 +502,10 @@ impl PoolMember { ) -> Result<(), Error> { if let Some(new_points) = self.points.checked_sub(&points_dissolved) { match self.unbonding_eras.get_mut(&unbonding_era) { - Some(already_unbonding_points) => + Some(already_unbonding_points) => { *already_unbonding_points = - already_unbonding_points.saturating_add(points_issued), + already_unbonding_points.saturating_add(points_issued) + }, None => self .unbonding_eras .try_insert(unbonding_era, points_issued) @@ -746,8 +747,8 @@ impl BondedPool { } fn can_nominate(&self, who: &T::AccountId) -> bool { - self.is_root(who) || - self.roles.nominator.as_ref().map_or(false, |nominator| nominator == who) + self.is_root(who) + || self.roles.nominator.as_ref().map_or(false, |nominator| nominator == who) } fn can_kick(&self, who: &T::AccountId) -> bool { @@ -840,9 +841,9 @@ impl BondedPool { // any unbond must comply with the balance condition: ensure!( - is_full_unbond || - balance_after_unbond >= - if is_depositor { + is_full_unbond + || balance_after_unbond + >= if is_depositor { Pallet::::depositor_min_bond() } else { MinJoinBond::::get() @@ -874,7 +875,7 @@ impl BondedPool { }, (false, true) => { // the depositor can simply not be unbonded permissionlessly, period. - return Err(Error::::DoesNotHavePermission.into()) + return Err(Error::::DoesNotHavePermission.into()); }, }; @@ -1578,10 +1579,12 @@ pub mod pallet { Self::do_reward_payout(&who, &mut member, &mut bonded_pool, &mut reward_pool)?; let (points_issued, bonded) = match extra { - BondExtra::FreeBalance(amount) => - (bonded_pool.try_bond_funds(&who, amount, BondType::Later)?, amount), - BondExtra::Rewards => - (bonded_pool.try_bond_funds(&who, claimed, BondType::Later)?, claimed), + BondExtra::FreeBalance(amount) => { + (bonded_pool.try_bond_funds(&who, amount, BondType::Later)?, amount) + }, + BondExtra::Rewards => { + (bonded_pool.try_bond_funds(&who, claimed, BondType::Later)?, claimed) + }, }; bonded_pool.ok_to_be_open()?; @@ -2134,7 +2137,7 @@ impl Pallet { let current_reward_counter = reward_pool .current_reward_counter(pool_member.pool_id, bonded_pool.points) .ok()?; - return pool_member.pending_rewards(current_reward_counter).ok() + return pool_member.pending_rewards(current_reward_counter).ok(); } } @@ -2276,7 +2279,7 @@ impl Pallet { let balance = |x| T::U256ToBalance::convert(x); if current_balance.is_zero() || current_points.is_zero() || points.is_zero() { // There is nothing to unbond - return Zero::zero() + return Zero::zero(); } // Equivalent of (current_balance / current_points) * points @@ -2304,7 +2307,7 @@ impl Pallet { let pending_rewards = member.pending_rewards(current_reward_counter)?; if pending_rewards.is_zero() { - return Ok(pending_rewards) + return Ok(pending_rewards); } // IFF the reward is non-zero alter the member and reward pool info. @@ -2438,7 +2441,7 @@ impl Pallet { #[cfg(any(feature = "try-runtime", feature = "fuzzing", test, debug_assertions))] pub fn do_try_state(level: u8) -> Result<(), &'static str> { if level.is_zero() { - return Ok(()) + return Ok(()); } // note: while a bit wacky, since they have the same key, even collecting to vec should // result in the same set of keys, in the same order. @@ -2490,8 +2493,8 @@ impl Pallet { RewardPool::::current_balance(id) ); assert!( - RewardPool::::current_balance(id) >= - pools_members_pending_rewards.get(&id).map(|x| *x).unwrap_or_default() + RewardPool::::current_balance(id) + >= pools_members_pending_rewards.get(&id).map(|x| *x).unwrap_or_default() ) }); @@ -2506,8 +2509,8 @@ impl Pallet { let depositor = PoolMembers::::get(&bonded_pool.roles.depositor).unwrap(); assert!( - bonded_pool.is_destroying_and_only_depositor(depositor.active_points()) || - depositor.active_points() >= MinCreateBond::::get(), + bonded_pool.is_destroying_and_only_depositor(depositor.active_points()) + || depositor.active_points() >= MinCreateBond::::get(), "depositor must always have MinCreateBond stake in the pool, except for when the \ pool is being destroyed and the depositor is the last member", ); @@ -2515,7 +2518,7 @@ impl Pallet { assert!(MaxPoolMembers::::get().map_or(true, |max| all_members <= max)); if level <= 1 { - return Ok(()) + return Ok(()); } for (pool_id, _pool) in BondedPools::::iter() { diff --git a/frame/nomination-pools/src/migration.rs b/frame/nomination-pools/src/migration.rs index b73141c95f72c..2af575a378f34 100644 --- a/frame/nomination-pools/src/migration.rs +++ b/frame/nomination-pools/src/migration.rs @@ -219,14 +219,14 @@ pub mod v2 { Some(x) => x, None => { log!(error, "pool {} has no member! deleting it..", id); - return None + return None; }, }; let bonded_pool = match BondedPools::::get(id) { Some(x) => x, None => { log!(error, "pool {} has no bonded pool! deleting it..", id); - return None + return None; }, }; @@ -241,7 +241,7 @@ pub mod v2 { Some(x) => x, None => { log!(error, "pool {} for member {:?} does not exist!", id, who); - return None + return None; }, }; @@ -351,8 +351,8 @@ pub mod v2 { // all reward accounts must have more than ED. RewardPools::::iter().for_each(|(id, _)| { assert!( - T::Currency::free_balance(&Pallet::::create_reward_account(id)) >= - T::Currency::minimum_balance() + T::Currency::free_balance(&Pallet::::create_reward_account(id)) + >= T::Currency::minimum_balance() ) }); diff --git a/frame/preimage/src/lib.rs b/frame/preimage/src/lib.rs index 6549832c11f5d..44afd90dca36e 100644 --- a/frame/preimage/src/lib.rs +++ b/frame/preimage/src/lib.rs @@ -206,7 +206,7 @@ impl Pallet { origin: T::RuntimeOrigin, ) -> Result, BadOrigin> { if T::ManagerOrigin::ensure_origin(origin.clone()).is_ok() { - return Ok(None) + return Ok(None); } let who = ensure_signed(origin)?; Ok(Some(who)) @@ -230,12 +230,15 @@ impl Pallet { // We take a deposit only if there is a provided depositor and the preimage was not // previously requested. This also allows the tx to pay no fee. let status = match (StatusFor::::get(hash), maybe_depositor) { - (Some(RequestStatus::Requested { count, deposit, .. }), _) => - RequestStatus::Requested { count, deposit, len: Some(len) }, - (Some(RequestStatus::Unrequested { .. }), Some(_)) => - return Err(Error::::AlreadyNoted.into()), - (Some(RequestStatus::Unrequested { len, deposit }), None) => - RequestStatus::Requested { deposit: Some(deposit), count: 1, len: Some(len) }, + (Some(RequestStatus::Requested { count, deposit, .. }), _) => { + RequestStatus::Requested { count, deposit, len: Some(len) } + }, + (Some(RequestStatus::Unrequested { .. }), Some(_)) => { + return Err(Error::::AlreadyNoted.into()) + }, + (Some(RequestStatus::Unrequested { len, deposit }), None) => { + RequestStatus::Requested { deposit: Some(deposit), count: 1, len: Some(len) } + }, (None, None) => RequestStatus::Requested { count: 1, len: Some(len), deposit: None }, (None, Some(depositor)) => { let length = preimage.len() as u32; diff --git a/frame/preimage/src/migration.rs b/frame/preimage/src/migration.rs index a5d15c23c758a..2acfdb54718d1 100644 --- a/frame/preimage/src/migration.rs +++ b/frame/preimage/src/migration.rs @@ -94,7 +94,7 @@ pub mod v1 { "skipping MovePreimagesIntoBuckets: executed on wrong storage version.\ Expected version 0" ); - return weight + return weight; } let status = v0::StatusFor::::drain().collect::>(); @@ -108,7 +108,7 @@ pub mod v1 { preimage } else { log::error!(target: TARGET, "preimage not found for hash {:?}", &hash); - continue + continue; }; let len = preimage.len() as u32; if len > MAX_SIZE { @@ -118,22 +118,24 @@ pub mod v1 { &hash, len ); - continue + continue; } let status = match status { v0::RequestStatus::Unrequested(deposit) => match deposit { Some(deposit) => RequestStatus::Unrequested { deposit, len }, // `None` depositor becomes system-requested. - None => - RequestStatus::Requested { deposit: None, count: 1, len: Some(len) }, + None => { + RequestStatus::Requested { deposit: None, count: 1, len: Some(len) } + }, }, v0::RequestStatus::Requested(count) if count == 0 => { log::error!(target: TARGET, "preimage has counter of zero: {:?}", hash); - continue + continue; + }, + v0::RequestStatus::Requested(count) => { + RequestStatus::Requested { deposit: None, count, len: Some(len) } }, - v0::RequestStatus::Requested(count) => - RequestStatus::Requested { deposit: None, count, len: Some(len) }, }; log::trace!(target: TARGET, "Moving preimage {:?} with len {}", hash, len); diff --git a/frame/proxy/src/lib.rs b/frame/proxy/src/lib.rs index 5c07a2b012243..e5c7c7fdc3be8 100644 --- a/frame/proxy/src/lib.rs +++ b/frame/proxy/src/lib.rs @@ -499,9 +499,9 @@ pub mod pallet { let call_hash = T::CallHasher::hash_of(&call); let now = system::Pallet::::block_number(); Self::edit_announcements(&delegate, |ann| { - ann.real != real || - ann.call_hash != call_hash || - now.saturating_sub(ann.height) < def.delay + ann.real != real + || ann.call_hash != call_hash + || now.saturating_sub(ann.height) < def.delay }) .map_err(|_| Error::::Unannounced)?; @@ -758,8 +758,8 @@ impl Pallet { force_proxy_type: Option, ) -> Result, DispatchError> { let f = |x: &ProxyDefinition| -> bool { - &x.delegate == delegate && - force_proxy_type.as_ref().map_or(true, |y| &x.proxy_type == y) + &x.delegate == delegate + && force_proxy_type.as_ref().map_or(true, |y| &x.proxy_type == y) }; Ok(Proxies::::get(real).0.into_iter().find(f).ok_or(Error::::NotProxy)?) } @@ -777,15 +777,19 @@ impl Pallet { match c.is_sub_type() { // Proxy call cannot add or remove a proxy with more permissions than it already // has. - Some(Call::add_proxy { ref proxy_type, .. }) | - Some(Call::remove_proxy { ref proxy_type, .. }) + Some(Call::add_proxy { ref proxy_type, .. }) + | Some(Call::remove_proxy { ref proxy_type, .. }) if !def.proxy_type.is_superset(proxy_type) => - false, + { + false + }, // Proxy call cannot remove all proxies or kill pure proxies unless it has full // permissions. Some(Call::remove_proxies { .. }) | Some(Call::kill_pure { .. }) if def.proxy_type != T::ProxyType::default() => - false, + { + false + }, _ => def.proxy_type.filter(c), } }); diff --git a/frame/ranked-collective/src/lib.rs b/frame/ranked-collective/src/lib.rs index 33aed2704918c..2bf7f1948fb68 100644 --- a/frame/ranked-collective/src/lib.rs +++ b/frame/ranked-collective/src/lib.rs @@ -577,8 +577,9 @@ pub mod pallet { poll, |mut status| -> Result<(TallyOf, VoteRecord), DispatchError> { match status { - PollStatus::None | PollStatus::Completed(..) => - Err(Error::::NotPolling)?, + PollStatus::None | PollStatus::Completed(..) => { + Err(Error::::NotPolling)? + }, PollStatus::Ongoing(ref mut tally, class) => { match Voting::::get(&poll, &who) { Some(Aye(votes)) => { @@ -634,7 +635,7 @@ pub mod pallet { ); if r.unique == 0 { // return Err(Error::::NoneRemaining) - return Ok(Pays::Yes.into()) + return Ok(Pays::Yes.into()); } if let Some(cursor) = r.maybe_cursor { VotingCleanup::::insert(poll_index, BoundedVec::truncate_from(cursor)); diff --git a/frame/ranked-collective/src/tests.rs b/frame/ranked-collective/src/tests.rs index 68bb79f3d07f7..5596f44f594d0 100644 --- a/frame/ranked-collective/src/tests.rs +++ b/frame/ranked-collective/src/tests.rs @@ -120,8 +120,9 @@ impl Polling> for TestPolls { let mut polls = Polls::get(); let entry = polls.get_mut(&index); let r = match entry { - Some(Ongoing(ref mut tally_mut_ref, class)) => - f(PollStatus::Ongoing(tally_mut_ref, *class)), + Some(Ongoing(ref mut tally_mut_ref, class)) => { + f(PollStatus::Ongoing(tally_mut_ref, *class)) + }, Some(Completed(when, succeeded)) => f(PollStatus::Completed(*when, *succeeded)), None => f(PollStatus::None), }; @@ -137,8 +138,9 @@ impl Polling> for TestPolls { let mut polls = Polls::get(); let entry = polls.get_mut(&index); let r = match entry { - Some(Ongoing(ref mut tally_mut_ref, class)) => - f(PollStatus::Ongoing(tally_mut_ref, *class)), + Some(Ongoing(ref mut tally_mut_ref, class)) => { + f(PollStatus::Ongoing(tally_mut_ref, *class)) + }, Some(Completed(when, succeeded)) => f(PollStatus::Completed(*when, *succeeded)), None => f(PollStatus::None), }?; diff --git a/frame/referenda/src/branch.rs b/frame/referenda/src/branch.rs index d3744979fc547..67527458b047b 100644 --- a/frame/referenda/src/branch.rs +++ b/frame/referenda/src/branch.rs @@ -113,17 +113,17 @@ impl ServiceBranch { NotQueued => T::WeightInfo::place_decision_deposit_not_queued(), BeginDecidingPassing => T::WeightInfo::place_decision_deposit_passing(), BeginDecidingFailing => T::WeightInfo::place_decision_deposit_failing(), - BeginConfirming | - ContinueConfirming | - EndConfirming | - ContinueNotConfirming | - Approved | - Rejected | - RequeuedInsertion | - RequeuedSlide | - TimedOut | - Fail | - NoDeposit => return None, + BeginConfirming + | ContinueConfirming + | EndConfirming + | ContinueNotConfirming + | Approved + | Rejected + | RequeuedInsertion + | RequeuedSlide + | TimedOut + | Fail + | NoDeposit => return None, }; Some(ref_time_weight) diff --git a/frame/referenda/src/lib.rs b/frame/referenda/src/lib.rs index ba5f4aec956b1..40f2c33c7bdd3 100644 --- a/frame/referenda/src/lib.rs +++ b/frame/referenda/src/lib.rs @@ -755,8 +755,8 @@ impl, I: 'static> Pallet { when: T::BlockNumber, ) -> Option<(T::BlockNumber, ScheduleAddressOf)> { let alarm_interval = T::AlarmInterval::get().max(One::one()); - let when = when.saturating_add(alarm_interval).saturating_sub(One::one()) / - (alarm_interval.saturating_mul(alarm_interval)).max(One::one()); + let when = when.saturating_add(alarm_interval).saturating_sub(One::one()) + / (alarm_interval.saturating_mul(alarm_interval)).max(One::one()); let maybe_result = T::Scheduler::schedule( DispatchTime::At(when), None, @@ -872,7 +872,7 @@ impl, I: 'static> Pallet { Ok(c) => c, Err(_) => { debug_assert!(false, "Unable to create a bounded call from `one_fewer_deciding`??",); - return + return; }, }; let maybe_result = T::Scheduler::schedule( @@ -911,7 +911,7 @@ impl, I: 'static> Pallet { false, "Unable to create a bounded call from `nudge_referendum`??", ); - return false + return false; }, }; status.alarm = Self::set_alarm(call, alarm); @@ -1012,7 +1012,7 @@ impl, I: 'static> Pallet { ), true, ServiceBranch::TimedOut, - ) + ); } }, Some(deciding) => { @@ -1044,7 +1044,7 @@ impl, I: 'static> Pallet { ), true, ServiceBranch::Approved, - ) + ); }, Some(_) => ServiceBranch::ContinueConfirming, None => { @@ -1069,7 +1069,7 @@ impl, I: 'static> Pallet { ), true, ServiceBranch::Rejected, - ) + ); } if deciding.confirming.is_some() { // Stop confirming @@ -1159,7 +1159,7 @@ impl, I: 'static> Pallet { id: TrackIdOf, ) -> bool { let x = Perbill::from_rational(elapsed.min(period), period); - support_needed.passing(x, tally.support(id)) && - approval_needed.passing(x, tally.approval(id)) + support_needed.passing(x, tally.support(id)) + && approval_needed.passing(x, tally.approval(id)) } } diff --git a/frame/referenda/src/types.rs b/frame/referenda/src/types.rs index a97faca3bbfc2..dc2aa5fb8fd53 100644 --- a/frame/referenda/src/types.rs +++ b/frame/referenda/src/types.rs @@ -251,8 +251,9 @@ impl< Ongoing(x) if x.decision_deposit.is_none() => Ok(None), // Cannot refund deposit if Ongoing as this breaks assumptions. Ongoing(_) => Err(()), - Approved(_, _, d) | Rejected(_, _, d) | TimedOut(_, _, d) | Cancelled(_, _, d) => - Ok(d.take()), + Approved(_, _, d) | Rejected(_, _, d) | TimedOut(_, _, d) | Cancelled(_, _, d) => { + Ok(d.take()) + }, Killed(_) => Ok(None), } } @@ -411,10 +412,12 @@ impl Curve { /// Determine the `y` value for the given `x` value. pub(crate) fn threshold(&self, x: Perbill) -> Perbill { match self { - Self::LinearDecreasing { length, floor, ceil } => - *ceil - (x.min(*length).saturating_div(*length, Down) * (*ceil - *floor)), - Self::SteppedDecreasing { begin, end, step, period } => - (*begin - (step.int_mul(x.int_div(*period))).min(*begin)).max(*end), + Self::LinearDecreasing { length, floor, ceil } => { + *ceil - (x.min(*length).saturating_div(*length, Down) * (*ceil - *floor)) + }, + Self::SteppedDecreasing { begin, end, step, period } => { + (*begin - (step.int_mul(x.int_div(*period))).min(*begin)).max(*end) + }, Self::Reciprocal { factor, x_offset, y_offset } => factor .checked_rounding_div(FixedI64::from(x) + *x_offset, Low) .map(|yp| (yp + *y_offset).into_clamped_perthing()) @@ -453,20 +456,22 @@ impl Curve { /// ``` pub fn delay(&self, y: Perbill) -> Perbill { match self { - Self::LinearDecreasing { length, floor, ceil } => + Self::LinearDecreasing { length, floor, ceil } => { if y < *floor { Perbill::one() } else if y > *ceil { Perbill::zero() } else { (*ceil - y).saturating_div(*ceil - *floor, Up).saturating_mul(*length) - }, - Self::SteppedDecreasing { begin, end, step, period } => + } + }, + Self::SteppedDecreasing { begin, end, step, period } => { if y < *end { Perbill::one() } else { period.int_mul((*begin - y.min(*begin) + step.less_epsilon()).int_div(*step)) - }, + } + }, Self::Reciprocal { factor, x_offset, y_offset } => { let y = FixedI64::from(y); let maybe_term = factor.checked_rounding_div(y - *y_offset, High); diff --git a/frame/scheduler/src/benchmarking.rs b/frame/scheduler/src/benchmarking.rs index e621c913b2386..27fffe2f134ec 100644 --- a/frame/scheduler/src/benchmarking.rs +++ b/frame/scheduler/src/benchmarking.rs @@ -97,11 +97,11 @@ fn make_call(maybe_lookup_len: Option) -> Bounded<: Some(x) => x, None => { len -= 1; - continue + continue; }, }; if c.lookup_needed() == maybe_lookup_len.is_some() { - break c + break c; } if maybe_lookup_len.is_some() { len += 1; @@ -109,7 +109,7 @@ fn make_call(maybe_lookup_len: Option) -> Bounded<: if len > 0 { len -= 1; } else { - break c + break c; } } } diff --git a/frame/scheduler/src/lib.rs b/frame/scheduler/src/lib.rs index 78533540be98f..965080b36bca2 100644 --- a/frame/scheduler/src/lib.rs +++ b/frame/scheduler/src/lib.rs @@ -602,7 +602,7 @@ impl> Pallet { &h, &err ); - return None + return None; } weight.saturating_accrue(T::DbWeight::get().reads(1)); log::info!("Migrated call by hash, hash: {:?}", h); @@ -704,7 +704,7 @@ impl Pallet { }; if when <= now { - return Err(Error::::TargetBlockNumberInPast.into()) + return Err(Error::::TargetBlockNumberInPast.into()); } Ok(when) @@ -738,7 +738,7 @@ impl Pallet { agenda[hole_index] = Some(what); hole_index as u32 } else { - return Err((DispatchError::Exhausted, what)) + return Err((DispatchError::Exhausted, what)); } }; Agenda::::insert(when, agenda); @@ -783,7 +783,7 @@ impl Pallet { T::OriginPrivilegeCmp::cmp_privilege(o, &s.origin), Some(Ordering::Less) | None ) { - return Err(BadOrigin.into()) + return Err(BadOrigin.into()); } }; Ok(s.take()) @@ -798,7 +798,7 @@ impl Pallet { Self::deposit_event(Event::Canceled { when, index }); Ok(()) } else { - return Err(Error::::NotFound.into()) + return Err(Error::::NotFound.into()); } } @@ -809,7 +809,7 @@ impl Pallet { let new_time = Self::resolve_time(new_time)?; if new_time == when { - return Err(Error::::RescheduleNoChange.into()) + return Err(Error::::RescheduleNoChange.into()); } let task = Agenda::::try_mutate(when, |agenda| { @@ -832,7 +832,7 @@ impl Pallet { ) -> Result, DispatchError> { // ensure id it is unique if Lookup::::contains_key(&id) { - return Err(Error::::FailedToSchedule.into()) + return Err(Error::::FailedToSchedule.into()); } let when = Self::resolve_time(when)?; @@ -865,7 +865,7 @@ impl Pallet { T::OriginPrivilegeCmp::cmp_privilege(o, &s.origin), Some(Ordering::Less) | None ) { - return Err(BadOrigin.into()) + return Err(BadOrigin.into()); } T::Preimages::drop(&s.call); } @@ -876,7 +876,7 @@ impl Pallet { Self::deposit_event(Event::Canceled { when, index }); Ok(()) } else { - return Err(Error::::NotFound.into()) + return Err(Error::::NotFound.into()); } }) } @@ -891,7 +891,7 @@ impl Pallet { let (when, index) = lookup.ok_or(Error::::NotFound)?; if new_time == when { - return Err(Error::::RescheduleNoChange.into()) + return Err(Error::::RescheduleNoChange.into()); } let task = Agenda::::try_mutate(when, |agenda| { @@ -915,7 +915,7 @@ impl Pallet { /// Service up to `max` agendas queue starting from earliest incompletely executed agenda. fn service_agendas(weight: &mut WeightMeter, now: T::BlockNumber, max: u32) { if !weight.check_accrue(T::WeightInfo::service_agendas_base()) { - return + return; } let mut incomplete_since = now + One::one(); @@ -977,7 +977,7 @@ impl Pallet { ); if !weight.can_accrue(base_weight) { postponed += 1; - break + break; } let result = Self::service_task(weight, now, when, agenda_index, *executed == 0, task); agenda[agenda_index as usize] = match result { @@ -1103,14 +1103,15 @@ impl Pallet { let max_weight = base_weight.saturating_add(call_weight); if !weight.can_accrue(max_weight) { - return Err(Overweight) + return Err(Overweight); } let dispatch_origin = origin.into(); let (maybe_actual_call_weight, result) = match call.dispatch(dispatch_origin) { Ok(post_info) => (post_info.actual_weight, Ok(())), - Err(error_and_info) => - (error_and_info.post_info.actual_weight, Err(error_and_info.error)), + Err(error_and_info) => { + (error_and_info.post_info.actual_weight, Err(error_and_info.error)) + }, }; let call_weight = maybe_actual_call_weight.unwrap_or(call_weight); weight.check_accrue(base_weight); diff --git a/frame/scheduler/src/migration.rs b/frame/scheduler/src/migration.rs index 6769d20023196..d38eaa367c45f 100644 --- a/frame/scheduler/src/migration.rs +++ b/frame/scheduler/src/migration.rs @@ -125,7 +125,7 @@ pub mod v3 { agenda.len(), max_scheduled_per_block, ); - return Err("Agenda would overflow `MaxScheduledPerBlock`.") + return Err("Agenda would overflow `MaxScheduledPerBlock`."); } } // Check that bounding the calls will not overflow `MAX_LENGTH`. @@ -142,7 +142,7 @@ pub mod v3 { block_number, l, ); - return Err("Call is too large.") + return Err("Call is too large."); } }, _ => (), @@ -162,7 +162,7 @@ pub mod v3 { Expected version 3, found {:?}", version, ); - return T::DbWeight::get().reads(1) + return T::DbWeight::get().reads(1); } crate::Pallet::::migrate_v3_to_v4() diff --git a/frame/scheduler/src/tests.rs b/frame/scheduler/src/tests.rs index 033d787946709..92a77011d3ae1 100644 --- a/frame/scheduler/src/tests.rs +++ b/frame/scheduler/src/tests.rs @@ -627,11 +627,11 @@ fn on_initialize_weight_is_correct() { // Will include the named periodic only assert_eq!( Scheduler::on_initialize(1), - TestWeightInfo::service_agendas_base() + - TestWeightInfo::service_agenda_base(1) + - ::service_task(None, true, true) + - TestWeightInfo::execute_dispatch_unsigned() + - call_weight + Weight::from_ref_time(4) + TestWeightInfo::service_agendas_base() + + TestWeightInfo::service_agenda_base(1) + + ::service_task(None, true, true) + + TestWeightInfo::execute_dispatch_unsigned() + + call_weight + Weight::from_ref_time(4) ); assert_eq!(IncompleteSince::::get(), None); assert_eq!(logger::log(), vec![(root(), 2600u32)]); @@ -639,14 +639,14 @@ fn on_initialize_weight_is_correct() { // Will include anon and anon periodic assert_eq!( Scheduler::on_initialize(2), - TestWeightInfo::service_agendas_base() + - TestWeightInfo::service_agenda_base(2) + - ::service_task(None, false, true) + - TestWeightInfo::execute_dispatch_unsigned() + - call_weight + Weight::from_ref_time(3) + - ::service_task(None, false, false) + - TestWeightInfo::execute_dispatch_unsigned() + - call_weight + Weight::from_ref_time(2) + TestWeightInfo::service_agendas_base() + + TestWeightInfo::service_agenda_base(2) + + ::service_task(None, false, true) + + TestWeightInfo::execute_dispatch_unsigned() + + call_weight + Weight::from_ref_time(3) + + ::service_task(None, false, false) + + TestWeightInfo::execute_dispatch_unsigned() + + call_weight + Weight::from_ref_time(2) ); assert_eq!(IncompleteSince::::get(), None); assert_eq!(logger::log(), vec![(root(), 2600u32), (root(), 69u32), (root(), 42u32)]); @@ -654,11 +654,11 @@ fn on_initialize_weight_is_correct() { // Will include named only assert_eq!( Scheduler::on_initialize(3), - TestWeightInfo::service_agendas_base() + - TestWeightInfo::service_agenda_base(1) + - ::service_task(None, true, false) + - TestWeightInfo::execute_dispatch_unsigned() + - call_weight + Weight::from_ref_time(1) + TestWeightInfo::service_agendas_base() + + TestWeightInfo::service_agenda_base(1) + + ::service_task(None, true, false) + + TestWeightInfo::execute_dispatch_unsigned() + + call_weight + Weight::from_ref_time(1) ); assert_eq!(IncompleteSince::::get(), None); assert_eq!( diff --git a/frame/scored-pool/src/lib.rs b/frame/scored-pool/src/lib.rs index a015c1c568153..424f9952d2d92 100644 --- a/frame/scored-pool/src/lib.rs +++ b/frame/scored-pool/src/lib.rs @@ -455,10 +455,12 @@ impl, I: 'static> Pallet { >::put(&new_members_bounded); match notify { - ChangeReceiver::MembershipInitialized => - T::MembershipInitialized::initialize_members(&new_members_bounded), - ChangeReceiver::MembershipChanged => - T::MembershipChanged::set_members_sorted(&new_members_bounded[..], &old_members[..]), + ChangeReceiver::MembershipInitialized => { + T::MembershipInitialized::initialize_members(&new_members_bounded) + }, + ChangeReceiver::MembershipChanged => { + T::MembershipChanged::set_members_sorted(&new_members_bounded[..], &old_members[..]) + }, } } diff --git a/frame/session/src/historical/mod.rs b/frame/session/src/historical/mod.rs index 45b4ba3c0a799..153dc28b8b2ba 100644 --- a/frame/session/src/historical/mod.rs +++ b/frame/session/src/historical/mod.rs @@ -106,7 +106,7 @@ impl Pallet { let up_to = sp_std::cmp::min(up_to, end); if up_to < start { - return // out of bounds. harmless. + return; // out of bounds. harmless. } (start..up_to).for_each(::HistoricalSessions::remove); @@ -352,7 +352,7 @@ impl> KeyOwnerProofSystem<(KeyTypeId, D)> for Pallet>::validators().len() as ValidatorCount; if count != proof.validator_count { - return None + return None; } Some((owner, id)) @@ -362,7 +362,7 @@ impl> KeyOwnerProofSystem<(KeyTypeId, D)> for Pallet>::get(&proof.session)?; if count != proof.validator_count { - return None + return None; } let trie = ProvingTrie::::from_nodes(root, &proof.trie_nodes); diff --git a/frame/session/src/lib.rs b/frame/session/src/lib.rs index 7b97a20860175..a882528ca4fff 100644 --- a/frame/session/src/lib.rs +++ b/frame/session/src/lib.rs @@ -681,7 +681,7 @@ impl Pallet { let mut now_session_keys = session_keys.iter(); let mut check_next_changed = |keys: &T::Keys| { if changed { - return + return; } // since a new validator set always leads to `changed` starting // as true, we can ensure that `now_session_keys` and `next_validators` @@ -717,14 +717,14 @@ impl Pallet { /// Disable the validator of index `i`, returns `false` if the validator was already disabled. pub fn disable_index(i: u32) -> bool { if i >= Validators::::decode_len().unwrap_or(0) as u32 { - return false + return false; } >::mutate(|disabled| { if let Err(index) = disabled.binary_search(&i) { disabled.insert(index, i); T::SessionHandler::on_disabled(i); - return true + return true; } false @@ -839,7 +839,7 @@ impl Pallet { if let Some(old) = old_keys.as_ref().map(|k| k.get_raw(*id)) { if key == old { - continue + continue; } Self::clear_key_owner(*id, old); diff --git a/frame/session/src/migrations/v1.rs b/frame/session/src/migrations/v1.rs index c0dce422fe8b5..d58d9daef418b 100644 --- a/frame/session/src/migrations/v1.rs +++ b/frame/session/src/migrations/v1.rs @@ -47,7 +47,7 @@ pub fn migrate::on_chain_storage_version(); @@ -104,7 +104,7 @@ pub fn pre_migrate< log_migration("pre-migration", storage_prefix_stored_range, OLD_PREFIX, new_pallet_name); if new_pallet_name == OLD_PREFIX { - return + return; } let new_pallet_prefix = twox_128(new_pallet_name.as_bytes()); @@ -145,7 +145,7 @@ pub fn post_migrate< log_migration("post-migration", storage_prefix_stored_range, OLD_PREFIX, new_pallet_name); if new_pallet_name == OLD_PREFIX { - return + return; } // Assert that no `HistoricalSessions` and `StoredRange` storages remains at the old prefix. diff --git a/frame/session/src/mock.rs b/frame/session/src/mock.rs index aa13eacba9564..c7490276c2768 100644 --- a/frame/session/src/mock.rs +++ b/frame/session/src/mock.rs @@ -122,8 +122,8 @@ pub struct TestShouldEndSession; impl ShouldEndSession for TestShouldEndSession { fn should_end_session(now: u64) -> bool { let l = SessionLength::get(); - now % l == 0 || - ForceSessionEnd::mutate(|l| { + now % l == 0 + || ForceSessionEnd::mutate(|l| { let r = *l; *l = false; r diff --git a/frame/society/src/lib.rs b/frame/society/src/lib.rs index 73a09490ea579..1d37dd2daaeff 100644 --- a/frame/society/src/lib.rs +++ b/frame/society/src/lib.rs @@ -1001,7 +1001,7 @@ pub mod pallet { } else { >::insert(&who, payouts); } - return Ok(()) + return Ok(()); } } Err(Error::::NoPayout.into()) @@ -1202,8 +1202,8 @@ pub mod pallet { // Reduce next pot by payout >::put(pot - value); // Add payout for new candidate - let maturity = >::block_number() + - Self::lock_duration(Self::members().len() as u32); + let maturity = >::block_number() + + Self::lock_duration(Self::members().len() as u32); Self::pay_accepted_candidate(&who, value, kind, maturity); }, Judgement::Reject => { @@ -1236,7 +1236,7 @@ pub mod pallet { // Remove suspended candidate >::remove(who); } else { - return Err(Error::::NotSuspended.into()) + return Err(Error::::NotSuspended.into()); } Ok(()) } @@ -1433,8 +1433,8 @@ impl, I: 'static> Pallet { // out of society. members.reserve(candidates.len()); - let maturity = >::block_number() + - Self::lock_duration(members.len() as u32); + let maturity = >::block_number() + + Self::lock_duration(members.len() as u32); let mut rewardees = Vec::new(); let mut total_approvals = 0; @@ -1621,7 +1621,7 @@ impl, I: 'static> Pallet { // whole slash is accounted for. *amount -= rest; rest = Zero::zero(); - break + break; } } >::insert(who, &payouts[dropped..]); @@ -1793,7 +1793,7 @@ impl, I: 'static> Pallet { selected.push(bid.clone()); zero_selected = true; count += 1; - return false + return false; } } else { total_cost += bid.value; @@ -1801,7 +1801,7 @@ impl, I: 'static> Pallet { if total_cost <= pot { selected.push(bid.clone()); count += 1; - return false + return false; } } } diff --git a/frame/staking/reward-curve/src/lib.rs b/frame/staking/reward-curve/src/lib.rs index e1ea8aa7b15d5..f1d327a2338d7 100644 --- a/frame/staking/reward-curve/src/lib.rs +++ b/frame/staking/reward-curve/src/lib.rs @@ -135,10 +135,10 @@ struct Bounds { impl Bounds { fn check(&self, value: u32) -> bool { - let wrong = (self.min_strict && value <= self.min) || - (!self.min_strict && value < self.min) || - (self.max_strict && value >= self.max) || - (!self.max_strict && value > self.max); + let wrong = (self.min_strict && value <= self.min) + || (!self.min_strict && value < self.min) + || (self.max_strict && value >= self.max) + || (!self.max_strict && value > self.max); !wrong } @@ -174,7 +174,7 @@ fn parse_field( value, bounds, ), - )) + )); } Ok(value) @@ -195,7 +195,7 @@ impl Parse for INposInput { ::parse(input)?; if !input.is_empty() { - return Err(input.error("expected end of input stream, no token expected")) + return Err(input.error("expected end of input stream, no token expected")); } let min_inflation = parse_field::( @@ -230,7 +230,7 @@ impl Parse for INposInput { >::parse(&args_input)?; if !args_input.is_empty() { - return Err(args_input.error("expected end of input stream, no token expected")) + return Err(args_input.error("expected end of input stream, no token expected")); } Ok(Self { @@ -272,7 +272,7 @@ impl INPoS { // See web3 docs for the details fn compute_opposite_after_x_ideal(&self, y: u32) -> u32 { if y == self.i_0 { - return u32::MAX + return u32::MAX; } // Note: the log term calculated here represents a per_million value let log = log2(self.i_ideal_times_x_ideal - self.i_0, y - self.i_0); @@ -290,8 +290,8 @@ fn compute_points(input: &INposInput) -> Vec<(u32, u32)> { // For each point p: (next_p.0 - p.0) < segment_length && (next_p.1 - p.1) < segment_length. // This ensures that the total number of segment doesn't overflow max_piece_count. - let max_length = (input.max_inflation - input.min_inflation + 1_000_000 - inpos.x_ideal) / - (input.max_piece_count - 1); + let max_length = (input.max_inflation - input.min_inflation + 1_000_000 - inpos.x_ideal) + / (input.max_piece_count - 1); let mut delta_y = max_length; let mut y = input.max_inflation; @@ -303,29 +303,29 @@ fn compute_points(input: &INposInput) -> Vec<(u32, u32)> { if next_y <= input.min_inflation { delta_y = delta_y.saturating_sub(1); - continue + continue; } let next_x = inpos.compute_opposite_after_x_ideal(next_y); if (next_x - points.last().unwrap().0) > max_length { delta_y = delta_y.saturating_sub(1); - continue + continue; } if next_x >= 1_000_000 { let prev = points.last().unwrap(); // Compute the y corresponding to x=1_000_000 using the this point and the previous one. - let delta_y: u32 = ((next_x - 1_000_000) as u64 * (prev.1 - next_y) as u64 / - (next_x - prev.0) as u64) + let delta_y: u32 = ((next_x - 1_000_000) as u64 * (prev.1 - next_y) as u64 + / (next_x - prev.0) as u64) .try_into() .unwrap(); let y = next_y + delta_y; points.push((1_000_000, y)); - return points + return points; } points.push((next_x, next_y)); y = next_y; diff --git a/frame/staking/reward-curve/src/log.rs b/frame/staking/reward-curve/src/log.rs index 248a1e3c36a6e..6c8c662755b69 100644 --- a/frame/staking/reward-curve/src/log.rs +++ b/frame/staking/reward-curve/src/log.rs @@ -39,7 +39,7 @@ pub fn log2(p: u32, q: u32) -> u32 { // log2(1) = 0 if p == q { - return 0 + return 0; } // find the power of 2 where q * 2^n <= p < q * 2^(n+1) @@ -59,7 +59,7 @@ pub fn log2(p: u32, q: u32) -> u32 { loop { let term = taylor_term(k, y_num.into(), y_den.into()); if term == 0 { - break + break; } res += term; diff --git a/frame/staking/reward-fn/src/lib.rs b/frame/staking/reward-fn/src/lib.rs index cc9919c28cce3..3c672eec0adce 100644 --- a/frame/staking/reward-fn/src/lib.rs +++ b/frame/staking/reward-fn/src/lib.rs @@ -54,12 +54,12 @@ use sp_arithmetic::{ pub fn compute_inflation(stake: P, ideal_stake: P, falloff: P) -> P { if stake < ideal_stake { // ideal_stake is more than 0 because it is strictly more than stake - return stake / ideal_stake + return stake / ideal_stake; } if falloff < P::from_percent(1.into()) { log::error!("Invalid inflation computation: falloff less than 1% is not supported"); - return PerThing::zero() + return PerThing::zero(); } let accuracy = { @@ -130,7 +130,7 @@ fn compute_taylor_serie_part(p: &INPoSParam) -> BigUint { last_taylor_term = compute_taylor_term(k, &last_taylor_term, p); if last_taylor_term.is_zero() { - break + break; } let last_taylor_term_positive = k % 2 == 0; @@ -153,7 +153,7 @@ fn compute_taylor_serie_part(p: &INPoSParam) -> BigUint { } if !taylor_sum_positive { - return BigUint::zero() + return BigUint::zero(); } taylor_sum.lstrip(); @@ -195,15 +195,15 @@ fn div_by_stripped(mut a: BigUint, b: &BigUint) -> BigUint { if b.len() == 0 { log::error!("Computation error: Invalid division"); - return BigUint::zero() + return BigUint::zero(); } if b.len() == 1 { - return a.div_unit(b.checked_get(0).unwrap_or(1)) + return a.div_unit(b.checked_get(0).unwrap_or(1)); } if b.len() > a.len() { - return BigUint::zero() + return BigUint::zero(); } if b.len() == a.len() { @@ -217,7 +217,7 @@ fn div_by_stripped(mut a: BigUint, b: &BigUint) -> BigUint { .map(|res| res.0) .unwrap_or_else(BigUint::zero) .div_unit(100_000) - .div_unit(100_000) + .div_unit(100_000); } a.div(b, false).map(|res| res.0).unwrap_or_else(BigUint::zero) diff --git a/frame/staking/src/benchmarking.rs b/frame/staking/src/benchmarking.rs index dcb861e2ce419..b3fcb72a73d77 100644 --- a/frame/staking/src/benchmarking.rs +++ b/frame/staking/src/benchmarking.rs @@ -51,7 +51,7 @@ type MaxNominators = <::BenchmarkingConfig as BenchmarkingConfig // read and write operations. pub fn add_slashing_spans(who: &T::AccountId, spans: u32) { if spans == 0 { - return + return; } // For the first slashing span, we initialize diff --git a/frame/staking/src/inflation.rs b/frame/staking/src/inflation.rs index c7519683c75d1..4f9d4f2501d82 100644 --- a/frame/staking/src/inflation.rs +++ b/frame/staking/src/inflation.rs @@ -42,8 +42,8 @@ where const MILLISECONDS_PER_YEAR: u64 = 1000 * 3600 * 24 * 36525 / 100; let portion = Perbill::from_rational(era_duration as u64, MILLISECONDS_PER_YEAR); - let payout = portion * - yearly_inflation + let payout = portion + * yearly_inflation .calculate_for_fraction_times_denominator(npos_token_staked, total_tokens.clone()); let maximum = portion * (yearly_inflation.maximum * total_tokens); (payout, maximum) diff --git a/frame/staking/src/lib.rs b/frame/staking/src/lib.rs index 0f5b8e0123ab6..baaf82b023e7c 100644 --- a/frame/staking/src/lib.rs +++ b/frame/staking/src/lib.rs @@ -532,7 +532,7 @@ impl StakingLedger { } if unlocking_balance >= value { - break + break; } } @@ -569,7 +569,7 @@ impl StakingLedger { slash_era: EraIndex, ) -> BalanceOf { if slash_amount.is_zero() { - return Zero::zero() + return Zero::zero(); } use sp_runtime::PerThing as _; @@ -663,7 +663,7 @@ impl StakingLedger { let mut slashed_unlocking = BTreeMap::<_, _>::new(); for i in slash_chunks_priority { if remaining_slash.is_zero() { - break + break; } if let Some(chunk) = self.unlocking.get_mut(i).defensive() { @@ -671,7 +671,7 @@ impl StakingLedger { // write the new slashed value of this chunk to the map. slashed_unlocking.insert(chunk.era, chunk.value); } else { - break + break; } } diff --git a/frame/staking/src/migrations.rs b/frame/staking/src/migrations.rs index f2ccb4f8b096f..3031900629a1b 100644 --- a/frame/staking/src/migrations.rs +++ b/frame/staking/src/migrations.rs @@ -125,7 +125,9 @@ pub mod v11 { warn, "new bags-list name is equal to the old one, only bumping the version" ); - return T::DbWeight::get().reads(1).saturating_add(T::DbWeight::get().writes(1)) + return T::DbWeight::get() + .reads(1) + .saturating_add(T::DbWeight::get().writes(1)); } move_pallet(old_pallet_name.as_bytes(), new_pallet_name.as_bytes()); @@ -148,7 +150,7 @@ pub mod v11 { // skip storage prefix checks for the same pallet names if new_pallet_name == old_pallet_name { - return Ok(()) + return Ok(()); } let old_pallet_prefix = twox_128(N::get().as_bytes()); diff --git a/frame/staking/src/mock.rs b/frame/staking/src/mock.rs index 16e4e5ddd7aa2..ad230914693d5 100644 --- a/frame/staking/src/mock.rs +++ b/frame/staking/src/mock.rs @@ -712,9 +712,9 @@ pub(crate) fn on_offence_in_era( for &(bonded_era, start_session) in bonded_eras.iter() { if bonded_era == era { let _ = Staking::on_offence(offenders, slash_fraction, start_session, disable_strategy); - return + return; } else if bonded_era > era { - break + break; } } diff --git a/frame/staking/src/pallet/impls.rs b/frame/staking/src/pallet/impls.rs index 9be01dd823104..903290d5faf13 100644 --- a/frame/staking/src/pallet/impls.rs +++ b/frame/staking/src/pallet/impls.rs @@ -125,9 +125,10 @@ impl Pallet { .retain(|&x| x >= current_era.saturating_sub(history_depth)); match ledger.claimed_rewards.binary_search(&era) { - Ok(_) => + Ok(_) => { return Err(Error::::AlreadyClaimed - .with_weight(T::WeightInfo::payout_stakers_alive_staked(0))), + .with_weight(T::WeightInfo::payout_stakers_alive_staked(0))) + }, Err(pos) => ledger .claimed_rewards .try_insert(pos, era) @@ -160,7 +161,7 @@ impl Pallet { // Nothing to do if they have no reward points. if validator_reward_points.is_zero() { - return Ok(Some(T::WeightInfo::payout_stakers_alive_staked(0)).into()) + return Ok(Some(T::WeightInfo::payout_stakers_alive_staked(0)).into()); } // This is the fraction of the total reward that the validator and the @@ -260,8 +261,9 @@ impl Pallet { Self::update_ledger(&controller, &l); r }), - RewardDestination::Account(dest_account) => - Some(T::Currency::deposit_creating(&dest_account, amount)), + RewardDestination::Account(dest_account) => { + Some(T::Currency::deposit_creating(&dest_account, amount)) + }, RewardDestination::None => None, } } @@ -291,14 +293,14 @@ impl Pallet { _ => { // Either `Forcing::ForceNone`, // or `Forcing::NotForcing if era_length >= T::SessionsPerEra::get()`. - return None + return None; }, } // New era. let maybe_new_era_validators = Self::try_trigger_new_era(session_index, is_genesis); - if maybe_new_era_validators.is_some() && - matches!(ForceEra::::get(), Forcing::ForceNew) + if maybe_new_era_validators.is_some() + && matches!(ForceEra::::get(), Forcing::ForceNew) { ForceEra::::put(Forcing::NotForcing); } @@ -504,7 +506,7 @@ impl Pallet { } Self::deposit_event(Event::StakingElectionFailed); - return None + return None; } Self::deposit_event(Event::StakersElected); @@ -726,8 +728,8 @@ impl Pallet { let mut nominators_taken = 0u32; let mut sorted_voters = T::VoterList::iter(); - while all_voters.len() < max_allowed_len && - voters_seen < (NPOS_MAX_ITERATIONS_COEFFICIENT * max_allowed_len as u32) + while all_voters.len() < max_allowed_len + && voters_seen < (NPOS_MAX_ITERATIONS_COEFFICIENT * max_allowed_len as u32) { let voter = match sorted_voters.next() { Some(voter) => { @@ -804,8 +806,8 @@ impl Pallet { let mut targets_seen = 0; let mut targets_iter = T::TargetList::iter(); - while all_targets.len() < max_allowed_len && - targets_seen < (NPOS_MAX_ITERATIONS_COEFFICIENT * max_allowed_len as u32) + while all_targets.len() < max_allowed_len + && targets_seen < (NPOS_MAX_ITERATIONS_COEFFICIENT * max_allowed_len as u32) { let target = match targets_iter.next() { Some(target) => { @@ -952,7 +954,7 @@ impl ElectionDataProvider for Pallet { // We can't handle this case yet -- return an error. if maybe_max_len.map_or(false, |max_len| target_count > max_len as u32) { - return Err("Target snapshot too big") + return Err("Target snapshot too big"); } Ok(Self::get_npos_targets(None)) @@ -1230,7 +1232,7 @@ where add_db_reads_writes(1, 0); if active_era.is_none() { // This offence need not be re-submitted. - return consumed_weight + return consumed_weight; } active_era.expect("value checked not to be `None`; qed").index }; @@ -1271,7 +1273,7 @@ where // Skip if the validator is invulnerable. if invulnerables.contains(stash) { - continue + continue; } let unapplied = slashing::compute_slash::(slashing::SlashParams { @@ -1645,8 +1647,8 @@ impl Pallet { fn check_count() -> Result<(), &'static str> { ensure!( - ::VoterList::count() == - Nominators::::count() + Validators::::count(), + ::VoterList::count() + == Nominators::::count() + Validators::::count(), "wrong external count" ); @@ -1670,9 +1672,10 @@ impl Pallet { ErasStakers::::iter_prefix_values(era) .map(|expo| { ensure!( - expo.total == - expo.own + - expo.others + expo.total + == expo.own + + expo + .others .iter() .map(|e| e.value) .fold(Zero::zero(), |acc, x| acc + x), diff --git a/frame/staking/src/pallet/mod.rs b/frame/staking/src/pallet/mod.rs index 8fddba2150370..7b4fb122ea40e 100644 --- a/frame/staking/src/pallet/mod.rs +++ b/frame/staking/src/pallet/mod.rs @@ -649,8 +649,8 @@ pub mod pallet { _ => Ok(()), }); assert!( - ValidatorCount::::get() <= - ::MaxWinners::get() + ValidatorCount::::get() + <= ::MaxWinners::get() ); } @@ -790,8 +790,8 @@ pub mod pallet { // ensure election results are always bounded with the same value assert!( - ::MaxWinners::get() == - ::MaxWinners::get() + ::MaxWinners::get() + == ::MaxWinners::get() ); sp_std::if_std! { @@ -841,18 +841,18 @@ pub mod pallet { let stash = ensure_signed(origin)?; if >::contains_key(&stash) { - return Err(Error::::AlreadyBonded.into()) + return Err(Error::::AlreadyBonded.into()); } let controller = T::Lookup::lookup(controller)?; if >::contains_key(&controller) { - return Err(Error::::AlreadyPaired.into()) + return Err(Error::::AlreadyPaired.into()); } // Reject a bond which is considered to be _dust_. if value < T::Currency::minimum_balance() { - return Err(Error::::InsufficientBond.into()) + return Err(Error::::InsufficientBond.into()); } frame_system::Pallet::::inc_consumers(&stash).map_err(|_| Error::::BadState)?; @@ -1044,24 +1044,23 @@ pub mod pallet { ledger = ledger.consolidate_unlocked(current_era) } - let post_info_weight = if ledger.unlocking.is_empty() && - ledger.active < T::Currency::minimum_balance() - { - // This account must have called `unbond()` with some value that caused the active - // portion to fall below existential deposit + will have no more unlocking chunks - // left. We can now safely remove all staking-related information. - Self::kill_stash(&stash, num_slashing_spans)?; - // Remove the lock. - T::Currency::remove_lock(STAKING_ID, &stash); - // This is worst case scenario, so we use the full weight and return None - None - } else { - // This was the consequence of a partial unbond. just update the ledger and move on. - Self::update_ledger(&controller, &ledger); + let post_info_weight = + if ledger.unlocking.is_empty() && ledger.active < T::Currency::minimum_balance() { + // This account must have called `unbond()` with some value that caused the active + // portion to fall below existential deposit + will have no more unlocking chunks + // left. We can now safely remove all staking-related information. + Self::kill_stash(&stash, num_slashing_spans)?; + // Remove the lock. + T::Currency::remove_lock(STAKING_ID, &stash); + // This is worst case scenario, so we use the full weight and return None + None + } else { + // This was the consequence of a partial unbond. just update the ledger and move on. + Self::update_ledger(&controller, &ledger); - // This is only an update, so we use less overall weight. - Some(T::WeightInfo::withdraw_unbonded_update(num_slashing_spans)) - }; + // This is only an update, so we use less overall weight. + Some(T::WeightInfo::withdraw_unbonded_update(num_slashing_spans)) + }; // `old_total` should never be less than the new total because // `consolidate_unlocked` strictly subtracts balance. @@ -1251,7 +1250,7 @@ pub mod pallet { let old_controller = Self::bonded(&stash).ok_or(Error::::NotStash)?; let controller = T::Lookup::lookup(controller)?; if >::contains_key(&controller) { - return Err(Error::::AlreadyPaired.into()) + return Err(Error::::AlreadyPaired.into()); } if controller != old_controller { >::insert(&stash, &controller); @@ -1551,8 +1550,8 @@ pub mod pallet { let _ = ensure_signed(origin)?; let ed = T::Currency::minimum_balance(); - let reapable = T::Currency::total_balance(&stash) < ed || - Self::ledger(Self::bonded(stash.clone()).ok_or(Error::::NotStash)?) + let reapable = T::Currency::total_balance(&stash) < ed + || Self::ledger(Self::bonded(stash.clone()).ok_or(Error::::NotStash)?) .map(|l| l.total) .unwrap_or_default() < ed; ensure!(reapable, Error::::FundedTarget); @@ -1706,7 +1705,7 @@ pub mod pallet { if Nominators::::contains_key(&stash) && Nominators::::get(&stash).is_none() { Self::chill_stash(&stash); - return Ok(()) + return Ok(()); } if caller != controller { diff --git a/frame/staking/src/slashing.rs b/frame/staking/src/slashing.rs index a1900136d64fd..b2c8e22850006 100644 --- a/frame/staking/src/slashing.rs +++ b/frame/staking/src/slashing.rs @@ -124,7 +124,7 @@ impl SlashingSpans { pub(crate) fn end_span(&mut self, now: EraIndex) -> bool { let next_start = now + 1; if next_start <= self.last_start { - return false + return false; } let last_length = next_start - self.last_start; @@ -236,7 +236,7 @@ pub(crate) fn compute_slash( // kick out the validator even if they won't be slashed, // as long as the misbehavior is from their most recent slashing span. kick_out_if_recent::(params); - return None + return None; } let (prior_slash_p, _era_slash) = @@ -259,7 +259,7 @@ pub(crate) fn compute_slash( // pays out some reward even if the latest report is not max-in-era. // we opt to avoid the nominator lookups and edits and leave more rewards // for more drastic misbehavior. - return None + return None; } // apply slash to validator. @@ -542,7 +542,7 @@ impl<'a, T: 'a + Config> Drop for InspectingSpans<'a, T> { fn drop(&mut self) { // only update on disk if we slashed this account. if !self.dirty { - return + return; } if let Some((start, end)) = self.spans.prune(self.window_start) { @@ -672,7 +672,7 @@ fn pay_reporters( // nobody to pay out to or nothing to pay; // just treat the whole value as slashed. T::Slash::on_unbalanced(slashed_imbalance); - return + return; } // take rewards out of the slashed imbalance. diff --git a/frame/staking/src/tests.rs b/frame/staking/src/tests.rs index 6609b9087637d..c3157d948dd11 100644 --- a/frame/staking/src/tests.rs +++ b/frame/staking/src/tests.rs @@ -325,9 +325,9 @@ fn rewards_should_work() { assert_eq_error_rate!(Balances::total_balance(&21), init_balance_21, 2); assert_eq_error_rate!( Balances::total_balance(&100), - init_balance_100 + - part_for_100_from_10 * total_payout_0 * 2 / 3 + - part_for_100_from_20 * total_payout_0 * 1 / 3, + init_balance_100 + + part_for_100_from_10 * total_payout_0 * 2 / 3 + + part_for_100_from_20 * total_payout_0 * 1 / 3, 2 ); assert_eq_error_rate!(Balances::total_balance(&101), init_balance_101, 2); @@ -367,9 +367,9 @@ fn rewards_should_work() { assert_eq_error_rate!(Balances::total_balance(&21), init_balance_21, 2); assert_eq_error_rate!( Balances::total_balance(&100), - init_balance_100 + - part_for_100_from_10 * (total_payout_0 * 2 / 3 + total_payout_1) + - part_for_100_from_20 * total_payout_0 * 1 / 3, + init_balance_100 + + part_for_100_from_10 * (total_payout_0 * 2 / 3 + total_payout_1) + + part_for_100_from_20 * total_payout_0 * 1 / 3, 2 ); assert_eq_error_rate!(Balances::total_balance(&101), init_balance_101, 2); diff --git a/frame/state-trie-migration/src/lib.rs b/frame/state-trie-migration/src/lib.rs index 5255d4f6f3800..463642a5cb7e8 100644 --- a/frame/state-trie-migration/src/lib.rs +++ b/frame/state-trie-migration/src/lib.rs @@ -244,13 +244,13 @@ pub mod pallet { if limits.item.is_zero() || limits.size.is_zero() { // handle this minor edge case, else we would call `migrate_tick` at least once. log!(warn, "limits are zero. stopping"); - return Ok(()) + return Ok(()); } while !self.exhausted(limits) && !self.finished() { if let Err(e) = self.migrate_tick() { log!(error, "migrate_until_exhaustion failed: {:?}", e); - return Err(e) + return Err(e); } } @@ -327,7 +327,7 @@ pub mod pallet { _ => { // defensive: there must be an ongoing top migration. frame_support::defensive!("cannot migrate child key."); - return Ok(()) + return Ok(()); }, }; @@ -369,7 +369,7 @@ pub mod pallet { Progress::Complete => { // defensive: there must be an ongoing top migration. frame_support::defensive!("cannot migrate top key."); - return Ok(()) + return Ok(()); }, }; @@ -621,7 +621,7 @@ pub mod pallet { let (_imbalance, _remainder) = T::Currency::slash(&who, deposit); Self::deposit_event(Event::::Slashed { who, amount: deposit }); debug_assert!(_remainder.is_zero()); - return Ok(().into()) + return Ok(().into()); } Self::deposit_event(Event::::Migrated { @@ -1682,7 +1682,7 @@ pub(crate) mod remote_tests { let ((finished, weight), proof) = ext.execute_and_prove(|| { let weight = run_to_block::(now + One::one()).1; if StateTrieMigration::::migration_process().finished() { - return (true, weight) + return (true, weight); } duration += One::one(); now += One::one(); @@ -1709,7 +1709,7 @@ pub(crate) mod remote_tests { ext.commit_all().unwrap(); if finished { - break + break; } } diff --git a/frame/support/procedural/src/clone_no_bound.rs b/frame/support/procedural/src/clone_no_bound.rs index bd2741c0d47ab..eaf18a68ace1f 100644 --- a/frame/support/procedural/src/clone_no_bound.rs +++ b/frame/support/procedural/src/clone_no_bound.rs @@ -92,7 +92,7 @@ pub fn derive_clone_no_bound(input: proc_macro::TokenStream) -> proc_macro::Toke }, syn::Data::Union(_) => { let msg = "Union type not supported by `derive(CloneNoBound)`"; - return syn::Error::new(input.span(), msg).to_compile_error().into() + return syn::Error::new(input.span(), msg).to_compile_error().into(); }, }; diff --git a/frame/support/procedural/src/construct_runtime/expand/event.rs b/frame/support/procedural/src/construct_runtime/expand/event.rs index b11fcef1bfd53..9239731878ae2 100644 --- a/frame/support/procedural/src/construct_runtime/expand/event.rs +++ b/frame/support/procedural/src/construct_runtime/expand/event.rs @@ -44,7 +44,7 @@ pub fn expand_outer_event( be constructed: pallet `{}` must have generic `Event`", pallet_name, ); - return Err(syn::Error::new(pallet_name.span(), msg)) + return Err(syn::Error::new(pallet_name.span(), msg)); } let part_is_generic = !generics.params.is_empty(); diff --git a/frame/support/procedural/src/construct_runtime/expand/origin.rs b/frame/support/procedural/src/construct_runtime/expand/origin.rs index 1551d85ea4c96..a2a115b082c29 100644 --- a/frame/support/procedural/src/construct_runtime/expand/origin.rs +++ b/frame/support/procedural/src/construct_runtime/expand/origin.rs @@ -45,7 +45,7 @@ pub fn expand_outer_origin( be constructed: pallet `{}` must have generic `Origin`", name ); - return Err(syn::Error::new(name.span(), msg)) + return Err(syn::Error::new(name.span(), msg)); } caller_variants.extend(expand_origin_caller_variant( diff --git a/frame/support/procedural/src/construct_runtime/mod.rs b/frame/support/procedural/src/construct_runtime/mod.rs index 9e22037a6782e..9f61aa4a45dbf 100644 --- a/frame/support/procedural/src/construct_runtime/mod.rs +++ b/frame/support/procedural/src/construct_runtime/mod.rs @@ -169,10 +169,12 @@ pub fn construct_runtime(input: TokenStream) -> TokenStream { let definition = syn::parse_macro_input!(input as RuntimeDeclaration); let res = match definition { - RuntimeDeclaration::Implicit(implicit_def) => - construct_runtime_intermediary_expansion(input_copy.into(), implicit_def), - RuntimeDeclaration::Explicit(explicit_decl) => - construct_runtime_final_expansion(explicit_decl), + RuntimeDeclaration::Implicit(implicit_def) => { + construct_runtime_intermediary_expansion(input_copy.into(), implicit_def) + }, + RuntimeDeclaration::Explicit(explicit_decl) => { + construct_runtime_final_expansion(explicit_decl) + }, }; res.unwrap_or_else(|e| e.to_compile_error()).into() @@ -230,7 +232,7 @@ fn construct_runtime_final_expansion( return Err(syn::Error::new( system_pallet.name.span(), "`System` pallet declaration is feature gated, please remove any `#[cfg]` attributes", - )) + )); } let features = pallets diff --git a/frame/support/procedural/src/construct_runtime/parse.rs b/frame/support/procedural/src/construct_runtime/parse.rs index 7a5acf43b92b0..3865cb6befb4f 100644 --- a/frame/support/procedural/src/construct_runtime/parse.rs +++ b/frame/support/procedural/src/construct_runtime/parse.rs @@ -88,19 +88,21 @@ impl Parse for RuntimeDeclaration { let pallets_token = pallets.token; match convert_pallets(pallets.content.inner.into_iter().collect())? { - PalletsConversion::Implicit(pallets) => + PalletsConversion::Implicit(pallets) => { Ok(RuntimeDeclaration::Implicit(ImplicitRuntimeDeclaration { name, where_section, pallets, - })), - PalletsConversion::Explicit(pallets) => + })) + }, + PalletsConversion::Explicit(pallets) => { Ok(RuntimeDeclaration::Explicit(ExplicitRuntimeDeclaration { name, where_section, pallets, pallets_token, - })), + })) + }, } } } @@ -121,9 +123,9 @@ impl Parse for WhereSection { definitions.push(definition); if !input.peek(Token![,]) { if !input.peek(token::Brace) { - return Err(input.error("Expected `,` or `{`")) + return Err(input.error("Expected `,` or `{`")); } - break + break; } input.parse::()?; } @@ -136,7 +138,7 @@ impl Parse for WhereSection { "`{:?}` was declared above. Please use exactly one declaration for `{:?}`.", kind, kind ); - return Err(Error::new(*kind_span, msg)) + return Err(Error::new(*kind_span, msg)); } Ok(Self { block, node_block, unchecked_extrinsic }) } @@ -166,7 +168,7 @@ impl Parse for WhereDefinition { } else if lookahead.peek(keyword::UncheckedExtrinsic) { (input.parse::()?.span(), WhereKind::UncheckedExtrinsic) } else { - return Err(lookahead.error()) + return Err(lookahead.error()); }; Ok(Self { @@ -227,12 +229,12 @@ impl Parse for PalletDeclaration { let res = Some(input.parse()?); let _: Token![>] = input.parse()?; res - } else if !(input.peek(Token![::]) && input.peek3(token::Brace)) && - !input.peek(keyword::exclude_parts) && - !input.peek(keyword::use_parts) && - !input.peek(Token![=]) && - !input.peek(Token![,]) && - !input.is_empty() + } else if !(input.peek(Token![::]) && input.peek3(token::Brace)) + && !input.peek(keyword::exclude_parts) + && !input.peek(keyword::use_parts) + && !input.peek(Token![=]) + && !input.peek(Token![,]) + && !input.is_empty() { return Err(input.error( "Unexpected tokens, expected one of `::$ident` `::{`, `exclude_parts`, `use_parts`, `=`, `,`", @@ -245,15 +247,15 @@ impl Parse for PalletDeclaration { let pallet_parts = if input.peek(Token![::]) && input.peek3(token::Brace) { let _: Token![::] = input.parse()?; Some(parse_pallet_parts(input)?) - } else if !input.peek(keyword::exclude_parts) && - !input.peek(keyword::use_parts) && - !input.peek(Token![=]) && - !input.peek(Token![,]) && - !input.is_empty() + } else if !input.peek(keyword::exclude_parts) + && !input.peek(keyword::use_parts) + && !input.peek(Token![=]) + && !input.peek(Token![,]) + && !input.is_empty() { return Err(input.error( "Unexpected tokens, expected one of `::{`, `exclude_parts`, `use_parts`, `=`, `,`", - )) + )); } else { None }; @@ -266,7 +268,7 @@ impl Parse for PalletDeclaration { let _: keyword::use_parts = input.parse()?; SpecifiedParts::Use(parse_pallet_parts_no_generic(input)?) } else if !input.peek(Token![=]) && !input.peek(Token![,]) && !input.is_empty() { - return Err(input.error("Unexpected tokens, expected one of `exclude_parts`, `=`, `,`")) + return Err(input.error("Unexpected tokens, expected one of `exclude_parts`, `=`, `,`")); } else { SpecifiedParts::All }; @@ -278,7 +280,7 @@ impl Parse for PalletDeclaration { let index = index.base10_parse::()?; Some(index) } else if !input.peek(Token![,]) && !input.is_empty() { - return Err(input.error("Unexpected tokens, expected one of `=`, `,`")) + return Err(input.error("Unexpected tokens, expected one of `=`, `,`")); } else { None }; @@ -314,15 +316,15 @@ impl Parse for PalletPath { PalletPath { inner: Path { leading_colon: None, segments: Punctuated::new() } }; let lookahead = input.lookahead1(); - if lookahead.peek(Token![crate]) || - lookahead.peek(Token![self]) || - lookahead.peek(Token![super]) || - lookahead.peek(Ident) + if lookahead.peek(Token![crate]) + || lookahead.peek(Token![self]) + || lookahead.peek(Token![super]) + || lookahead.peek(Ident) { let ident = input.call(Ident::parse_any)?; res.inner.segments.push(ident.into()); } else { - return Err(lookahead.error()) + return Err(lookahead.error()); } while input.peek(Token![::]) && input.peek3(Ident) { @@ -353,7 +355,7 @@ fn parse_pallet_parts(input: ParseStream) -> Result> { "`{}` was already declared before. Please remove the duplicate declaration", part.name(), ); - return Err(Error::new(part.keyword.span(), msg)) + return Err(Error::new(part.keyword.span(), msg)); } } @@ -458,7 +460,7 @@ impl Parse for PalletPart { keyword.name(), valid_generics, ); - return Err(syn::Error::new(keyword.span(), msg)) + return Err(syn::Error::new(keyword.span(), msg)); } Ok(Self { keyword, generics }) @@ -519,7 +521,7 @@ fn parse_pallet_parts_no_generic(input: ParseStream) -> Result) -> syn::Result { if pallets.iter().any(|pallet| pallet.pallet_parts.is_none()) { - return Ok(PalletsConversion::Implicit(pallets)) + return Ok(PalletsConversion::Implicit(pallets)); } let mut indices = HashMap::new(); @@ -600,7 +602,7 @@ fn convert_pallets(pallets: Vec) -> syn::Result) -> syn::Result) -> syn::Result + SpecifiedParts::Exclude(parts) | SpecifiedParts::Use(parts) => { for part in parts { if !available_parts.contains(part.keyword.name()) { let msg = format!( @@ -634,9 +636,10 @@ fn convert_pallets(pallets: Vec) -> syn::Result (), } @@ -660,7 +663,7 @@ fn convert_pallets(pallets: Vec) -> syn::Result Error { /// Implementation of the `crate_to_crate_version!` macro. pub fn crate_to_crate_version(input: proc_macro::TokenStream) -> Result { if !input.is_empty() { - return Err(create_error("No arguments expected!")) + return Err(create_error("No arguments expected!")); } let major_version = get_cargo_env_var::("CARGO_PKG_VERSION_MAJOR") diff --git a/frame/support/procedural/src/debug_no_bound.rs b/frame/support/procedural/src/debug_no_bound.rs index 56168edb87e83..dae23c539d528 100644 --- a/frame/support/procedural/src/debug_no_bound.rs +++ b/frame/support/procedural/src/debug_no_bound.rs @@ -106,7 +106,7 @@ pub fn derive_debug_no_bound(input: proc_macro::TokenStream) -> proc_macro::Toke }, syn::Data::Union(_) => { let msg = "Union type not supported by `derive(DebugNoBound)`"; - return syn::Error::new(input.span(), msg).to_compile_error().into() + return syn::Error::new(input.span(), msg).to_compile_error().into(); }, }; diff --git a/frame/support/procedural/src/default_no_bound.rs b/frame/support/procedural/src/default_no_bound.rs index 192be0786d96b..3f770ef36e53b 100644 --- a/frame/support/procedural/src/default_no_bound.rs +++ b/frame/support/procedural/src/default_no_bound.rs @@ -52,7 +52,7 @@ pub fn derive_default_no_bound(input: proc_macro::TokenStream) -> proc_macro::To quote::quote!(Self) }, }, - syn::Data::Enum(enum_) => + syn::Data::Enum(enum_) => { if let Some(first_variant) = enum_.variants.first() { let variant_ident = &first_variant.ident; match &first_variant.fields { @@ -83,10 +83,11 @@ pub fn derive_default_no_bound(input: proc_macro::TokenStream) -> proc_macro::To } } else { quote::quote!(Self) - }, + } + }, syn::Data::Union(_) => { let msg = "Union type not supported by `derive(CloneNoBound)`"; - return syn::Error::new(input.span(), msg).to_compile_error().into() + return syn::Error::new(input.span(), msg).to_compile_error().into(); }, }; diff --git a/frame/support/procedural/src/dummy_part_checker.rs b/frame/support/procedural/src/dummy_part_checker.rs index 792b17a8f7758..0377fb04e778d 100644 --- a/frame/support/procedural/src/dummy_part_checker.rs +++ b/frame/support/procedural/src/dummy_part_checker.rs @@ -5,7 +5,7 @@ pub fn generate_dummy_part_checker(input: TokenStream) -> TokenStream { if !input.is_empty() { return syn::Error::new(proc_macro2::Span::call_site(), "No arguments expected") .to_compile_error() - .into() + .into(); } let count = COUNTER.with(|counter| counter.borrow_mut().inc()); diff --git a/frame/support/procedural/src/key_prefix.rs b/frame/support/procedural/src/key_prefix.rs index 05582f1297eed..086c9f8c8dcf6 100644 --- a/frame/support/procedural/src/key_prefix.rs +++ b/frame/support/procedural/src/key_prefix.rs @@ -23,7 +23,7 @@ const MAX_IDENTS: usize = 18; pub fn impl_key_prefix_for_tuples(input: proc_macro::TokenStream) -> Result { if !input.is_empty() { - return Err(syn::Error::new(Span::call_site(), "No arguments expected")) + return Err(syn::Error::new(Span::call_site(), "No arguments expected")); } let mut all_trait_impls = TokenStream::new(); diff --git a/frame/support/procedural/src/match_and_insert.rs b/frame/support/procedural/src/match_and_insert.rs index 79d1da7549c1d..97630ccbae57c 100644 --- a/frame/support/procedural/src/match_and_insert.rs +++ b/frame/support/procedural/src/match_and_insert.rs @@ -64,14 +64,14 @@ impl syn::parse::Parse for MatchAndInsertDef { let pattern = pattern.parse::()?.into_iter().collect::>(); if let Some(t) = pattern.iter().find(|t| matches!(t, TokenTree::Group(_))) { - return Err(syn::Error::new(t.span(), "Unexpected group token tree")) + return Err(syn::Error::new(t.span(), "Unexpected group token tree")); } if let Some(t) = pattern.iter().find(|t| matches!(t, TokenTree::Literal(_))) { - return Err(syn::Error::new(t.span(), "Unexpected literal token tree")) + return Err(syn::Error::new(t.span(), "Unexpected literal token tree")); } if pattern.is_empty() { - return Err(syn::Error::new(Span::call_site(), "empty match pattern is invalid")) + return Err(syn::Error::new(Span::call_site(), "empty match pattern is invalid")); } let mut tokens; @@ -116,7 +116,7 @@ fn expand_in_stream( Ok(s) => { extended.extend(once(TokenTree::Group(Group::new(group.delimiter(), s)))); extended.extend(stream); - return Ok(extended) + return Ok(extended); }, Err(_) => { extended.extend(once(TokenTree::Group(group))); @@ -132,7 +132,7 @@ fn expand_in_stream( extended .extend(once(tokens.take().expect("tokens is used to replace only once"))); extended.extend(stream); - return Ok(extended) + return Ok(extended); } }, } diff --git a/frame/support/procedural/src/pallet/expand/error.rs b/frame/support/procedural/src/pallet/expand/error.rs index 5a8487b09de5c..2df42097f6df9 100644 --- a/frame/support/procedural/src/pallet/expand/error.rs +++ b/frame/support/procedural/src/pallet/expand/error.rs @@ -51,7 +51,7 @@ pub fn expand_error(def: &mut Def) -> proc_macro2::TokenStream { } pub use #error_token_unique_id as tt_error_token; - } + }; }; let error_ident = &error.error; diff --git a/frame/support/procedural/src/pallet/expand/event.rs b/frame/support/procedural/src/pallet/expand/event.rs index abed680eb245e..f9ce2b4992c3c 100644 --- a/frame/support/procedural/src/pallet/expand/event.rs +++ b/frame/support/procedural/src/pallet/expand/event.rs @@ -55,7 +55,7 @@ pub fn expand_event(def: &mut Def) -> proc_macro2::TokenStream { #[doc(hidden)] pub use #macro_ident as is_event_part_defined; } - } + }; }; let event_where_clause = &event.where_clause; diff --git a/frame/support/procedural/src/pallet/expand/genesis_build.rs b/frame/support/procedural/src/pallet/expand/genesis_build.rs index d19476779011b..96ffdad8daab7 100644 --- a/frame/support/procedural/src/pallet/expand/genesis_build.rs +++ b/frame/support/procedural/src/pallet/expand/genesis_build.rs @@ -24,7 +24,7 @@ pub fn expand_genesis_build(def: &mut Def) -> proc_macro2::TokenStream { let genesis_config = if let Some(genesis_config) = &def.genesis_config { genesis_config } else { - return Default::default() + return Default::default(); }; let genesis_build = def.genesis_build.as_ref().expect("Checked by def parser"); diff --git a/frame/support/procedural/src/pallet/expand/genesis_config.rs b/frame/support/procedural/src/pallet/expand/genesis_config.rs index 739e85e0d1ced..4b2ff94c6887f 100644 --- a/frame/support/procedural/src/pallet/expand/genesis_config.rs +++ b/frame/support/procedural/src/pallet/expand/genesis_config.rs @@ -71,7 +71,7 @@ pub fn expand_genesis_config(def: &mut Def) -> proc_macro2::TokenStream { #[doc(hidden)] pub use #std_macro_ident as is_std_enabled_for_genesis; } - } + }; }; let frame_support = &def.frame_support; @@ -82,9 +82,9 @@ pub fn expand_genesis_config(def: &mut Def) -> proc_macro2::TokenStream { let serde_crate = format!("{}::serde", frame_support); match genesis_config_item { - syn::Item::Enum(syn::ItemEnum { attrs, .. }) | - syn::Item::Struct(syn::ItemStruct { attrs, .. }) | - syn::Item::Type(syn::ItemType { attrs, .. }) => { + syn::Item::Enum(syn::ItemEnum { attrs, .. }) + | syn::Item::Struct(syn::ItemStruct { attrs, .. }) + | syn::Item::Type(syn::ItemType { attrs, .. }) => { if get_doc_literals(attrs).is_empty() { attrs.push(syn::parse_quote!( #[doc = r" diff --git a/frame/support/procedural/src/pallet/expand/storage.rs b/frame/support/procedural/src/pallet/expand/storage.rs index 181f35b545496..cc656c342d254 100644 --- a/frame/support/procedural/src/pallet/expand/storage.rs +++ b/frame/support/procedural/src/pallet/expand/storage.rs @@ -62,7 +62,7 @@ fn check_prefix_duplicates( if let Some(other_dup_err) = used_prefixes.insert(prefix.clone(), dup_err.clone()) { let mut err = dup_err; err.combine(other_dup_err); - return Err(err) + return Err(err); } if let Metadata::CountedMap { .. } = storage_def.metadata { @@ -79,7 +79,7 @@ fn check_prefix_duplicates( if let Some(other_dup_err) = used_prefixes.insert(counter_prefix, counter_dup_err.clone()) { let mut err = counter_dup_err; err.combine(other_dup_err); - return Err(err) + return Err(err); } } @@ -152,7 +152,7 @@ pub fn process_generics(def: &mut Def) -> syn::Result syn::Result syn::Result= args.args.len() && - matches!(storage_def.query_kind.as_ref(), Some(QueryKind::ResultQuery(_, _))) + if on_empty_idx >= args.args.len() + && matches!(storage_def.query_kind.as_ref(), Some(QueryKind::ResultQuery(_, _))) { let value_ty = match args.args[value_idx].clone() { syn::GenericArgument::Type(ty) => ty, @@ -321,7 +321,7 @@ pub fn expand_storages(def: &mut Def) -> proc_macro2::TokenStream { .filter_map(|storage_def| check_prefix_duplicates(storage_def, &mut prefix_set).err()); if let Some(mut final_error) = errors.next() { errors.for_each(|error| final_error.combine(error)); - return final_error.into_compile_error() + return final_error.into_compile_error(); } let frame_support = &def.frame_support; @@ -374,10 +374,11 @@ pub fn expand_storages(def: &mut Def) -> proc_macro2::TokenStream { QueryKind::OptionQuery => quote::quote_spanned!(storage.attr_span => Option<#value> ), - QueryKind::ResultQuery(error_path, _) => + QueryKind::ResultQuery(error_path, _) => { quote::quote_spanned!(storage.attr_span => Result<#value, #error_path> - ), + ) + }, QueryKind::ValueQuery => quote::quote!(#value), }; quote::quote_spanned!(storage.attr_span => @@ -397,10 +398,11 @@ pub fn expand_storages(def: &mut Def) -> proc_macro2::TokenStream { QueryKind::OptionQuery => quote::quote_spanned!(storage.attr_span => Option<#value> ), - QueryKind::ResultQuery(error_path, _) => + QueryKind::ResultQuery(error_path, _) => { quote::quote_spanned!(storage.attr_span => Result<#value, #error_path> - ), + ) + }, QueryKind::ValueQuery => quote::quote!(#value), }; quote::quote_spanned!(storage.attr_span => @@ -422,10 +424,11 @@ pub fn expand_storages(def: &mut Def) -> proc_macro2::TokenStream { QueryKind::OptionQuery => quote::quote_spanned!(storage.attr_span => Option<#value> ), - QueryKind::ResultQuery(error_path, _) => + QueryKind::ResultQuery(error_path, _) => { quote::quote_spanned!(storage.attr_span => Result<#value, #error_path> - ), + ) + }, QueryKind::ValueQuery => quote::quote!(#value), }; quote::quote_spanned!(storage.attr_span => @@ -447,10 +450,11 @@ pub fn expand_storages(def: &mut Def) -> proc_macro2::TokenStream { QueryKind::OptionQuery => quote::quote_spanned!(storage.attr_span => Option<#value> ), - QueryKind::ResultQuery(error_path, _) => + QueryKind::ResultQuery(error_path, _) => { quote::quote_spanned!(storage.attr_span => Result<#value, #error_path> - ), + ) + }, QueryKind::ValueQuery => quote::quote!(#value), }; quote::quote_spanned!(storage.attr_span => @@ -474,10 +478,11 @@ pub fn expand_storages(def: &mut Def) -> proc_macro2::TokenStream { QueryKind::OptionQuery => quote::quote_spanned!(storage.attr_span => Option<#value> ), - QueryKind::ResultQuery(error_path, _) => + QueryKind::ResultQuery(error_path, _) => { quote::quote_spanned!(storage.attr_span => Result<#value, #error_path> - ), + ) + }, QueryKind::ValueQuery => quote::quote!(#value), }; quote::quote_spanned!(storage.attr_span => diff --git a/frame/support/procedural/src/pallet/mod.rs b/frame/support/procedural/src/pallet/mod.rs index 3f85be81c1f7d..4d7b823178292 100644 --- a/frame/support/procedural/src/pallet/mod.rs +++ b/frame/support/procedural/src/pallet/mod.rs @@ -49,7 +49,7 @@ pub fn pallet( `dev_mode` attribute, such as `#[frame_support::pallet(dev_mode)]` or \ #[pallet(dev_mode)]."; let span = proc_macro2::TokenStream::from(attr).span(); - return syn::Error::new(span, msg).to_compile_error().into() + return syn::Error::new(span, msg).to_compile_error().into(); } } diff --git a/frame/support/procedural/src/pallet/parse/call.rs b/frame/support/procedural/src/pallet/parse/call.rs index fbca9a52c767c..ba9026c758425 100644 --- a/frame/support/procedural/src/pallet/parse/call.rs +++ b/frame/support/procedural/src/pallet/parse/call.rs @@ -149,7 +149,7 @@ impl CallDef { let item_impl = if let syn::Item::Impl(item) = item { item } else { - return Err(syn::Error::new(item.span(), "Invalid pallet::call, expected item impl")) + return Err(syn::Error::new(item.span(), "Invalid pallet::call, expected item impl")); }; let instances = vec![ @@ -160,7 +160,7 @@ impl CallDef { if let Some((_, _, for_)) = item_impl.trait_ { let msg = "Invalid pallet::call, expected no trait ident as in \ `impl<..> Pallet<..> { .. }`"; - return Err(syn::Error::new(for_.span(), msg)) + return Err(syn::Error::new(for_.span(), msg)); } let mut methods = vec![]; @@ -177,18 +177,18 @@ impl CallDef { _ => method.vis.span(), }; - return Err(syn::Error::new(span, msg)) + return Err(syn::Error::new(span, msg)); } match method.sig.inputs.first() { None => { let msg = "Invalid pallet::call, must have at least origin arg"; - return Err(syn::Error::new(method.sig.span(), msg)) + return Err(syn::Error::new(method.sig.span(), msg)); }, Some(syn::FnArg::Receiver(_)) => { let msg = "Invalid pallet::call, first argument must be a typed argument, \ e.g. `origin: OriginFor`"; - return Err(syn::Error::new(method.sig.span(), msg)) + return Err(syn::Error::new(method.sig.span(), msg)); }, Some(syn::FnArg::Typed(arg)) => { check_dispatchable_first_arg_type(&arg.ty)?; @@ -200,7 +200,7 @@ impl CallDef { } else { let msg = "Invalid pallet::call, require return type \ DispatchResultWithPostInfo"; - return Err(syn::Error::new(method.sig.span(), msg)) + return Err(syn::Error::new(method.sig.span(), msg)); } let (mut weight_attrs, mut call_idx_attrs): (Vec, Vec) = @@ -228,7 +228,7 @@ impl CallDef { } else { "Invalid pallet::call, too many weight attributes given" }; - return Err(syn::Error::new(method.sig.span(), msg)) + return Err(syn::Error::new(method.sig.span(), msg)); } let weight = match weight_attrs.pop().unwrap() { FunctionAttr::Weight(w) => w, @@ -237,7 +237,7 @@ impl CallDef { if call_idx_attrs.len() > 1 { let msg = "Invalid pallet::call, too many call_index attributes given"; - return Err(syn::Error::new(method.sig.span(), msg)) + return Err(syn::Error::new(method.sig.span(), msg)); } let call_index = call_idx_attrs.pop().map(|attr| match attr { FunctionAttr::CallIndex(idx) => idx, @@ -246,11 +246,12 @@ impl CallDef { let final_index = match call_index { Some(i) => i, - None => + None => { last_index.map_or(Some(0), |idx| idx.checked_add(1)).ok_or_else(|| { let msg = "Call index doesn't fit into u8, index is 256"; syn::Error::new(method.sig.span(), msg) - })?, + })? + }, }; last_index = Some(final_index); @@ -261,7 +262,7 @@ impl CallDef { ); let mut err = syn::Error::new(used_fn.span(), &msg); err.combine(syn::Error::new(method.sig.ident.span(), msg)); - return Err(err) + return Err(err); } let mut args = vec![]; @@ -277,14 +278,14 @@ impl CallDef { if arg_attrs.len() > 1 { let msg = "Invalid pallet::call, argument has too many attributes"; - return Err(syn::Error::new(arg.span(), msg)) + return Err(syn::Error::new(arg.span(), msg)); } let arg_ident = if let syn::Pat::Ident(pat) = &*arg.pat { pat.ident.clone() } else { let msg = "Invalid pallet::call, argument must be ident"; - return Err(syn::Error::new(arg.pat.span(), msg)) + return Err(syn::Error::new(arg.pat.span(), msg)); }; args.push((!arg_attrs.is_empty(), arg_ident, arg.ty.clone())); @@ -302,7 +303,7 @@ impl CallDef { }); } else { let msg = "Invalid pallet::call, only method accepted"; - return Err(syn::Error::new(item.span(), msg)) + return Err(syn::Error::new(item.span(), msg)); } } diff --git a/frame/support/procedural/src/pallet/parse/config.rs b/frame/support/procedural/src/pallet/parse/config.rs index 0f3aa69b170ce..faf4d7d1acada 100644 --- a/frame/support/procedural/src/pallet/parse/config.rs +++ b/frame/support/procedural/src/pallet/parse/config.rs @@ -227,7 +227,7 @@ fn check_event_type( if !type_.generics.params.is_empty() || type_.generics.where_clause.is_some() { let msg = "Invalid `type RuntimeEvent`, associated type `RuntimeEvent` is reserved and must have\ no generics nor where_clause"; - return Err(syn::Error::new(trait_item.span(), msg)) + return Err(syn::Error::new(trait_item.span(), msg)); } // Check bound contains IsType and From @@ -242,7 +242,7 @@ fn check_event_type( bound: `IsType<::RuntimeEvent>`", frame_system, ); - return Err(syn::Error::new(type_.span(), msg)) + return Err(syn::Error::new(type_.span(), msg)); } let from_event_bound = type_ @@ -255,7 +255,7 @@ fn check_event_type( } else { let msg = "Invalid `type RuntimeEvent`, associated type `RuntimeEvent` is reserved and must \ bound: `From` or `From>` or `From>`"; - return Err(syn::Error::new(type_.span(), msg)) + return Err(syn::Error::new(type_.span(), msg)); }; if from_event_bound.is_generic && (from_event_bound.has_instance != trait_has_instance) @@ -263,7 +263,7 @@ fn check_event_type( let msg = "Invalid `type RuntimeEvent`, associated type `RuntimeEvent` bounds inconsistent \ `From`. Config and generic Event must be both with instance or \ without instance"; - return Err(syn::Error::new(type_.span(), msg)) + return Err(syn::Error::new(type_.span(), msg)); } Ok(true) @@ -280,10 +280,12 @@ pub fn replace_self_by_t(input: proc_macro2::TokenStream) -> proc_macro2::TokenS input .into_iter() .map(|token_tree| match token_tree { - proc_macro2::TokenTree::Group(group) => - proc_macro2::Group::new(group.delimiter(), replace_self_by_t(group.stream())).into(), - proc_macro2::TokenTree::Ident(ident) if ident == "Self" => - proc_macro2::Ident::new("T", ident.span()).into(), + proc_macro2::TokenTree::Group(group) => { + proc_macro2::Group::new(group.delimiter(), replace_self_by_t(group.stream())).into() + }, + proc_macro2::TokenTree::Ident(ident) if ident == "Self" => { + proc_macro2::Ident::new("T", ident.span()).into() + }, other => other, }) .collect() @@ -300,12 +302,12 @@ impl ConfigDef { item } else { let msg = "Invalid pallet::config, expected trait definition"; - return Err(syn::Error::new(item.span(), msg)) + return Err(syn::Error::new(item.span(), msg)); }; if !matches!(item.vis, syn::Visibility::Public(_)) { let msg = "Invalid pallet::config, trait must be public"; - return Err(syn::Error::new(item.span(), msg)) + return Err(syn::Error::new(item.span(), msg)); } syn::parse2::(item.ident.to_token_stream())?; @@ -320,7 +322,7 @@ impl ConfigDef { if item.generics.params.len() > 1 { let msg = "Invalid pallet::config, expected no more than one generic"; - return Err(syn::Error::new(item.generics.params[2].span(), msg)) + return Err(syn::Error::new(item.generics.params[2].span(), msg)); } let has_instance = if item.generics.params.first().is_some() { @@ -342,7 +344,7 @@ impl ConfigDef { if type_attrs_const.len() > 1 { let msg = "Invalid attribute in pallet::config, only one attribute is expected"; - return Err(syn::Error::new(type_attrs_const[1].span(), msg)) + return Err(syn::Error::new(type_attrs_const[1].span(), msg)); } if type_attrs_const.len() == 1 { @@ -355,7 +357,7 @@ impl ConfigDef { let msg = "Invalid pallet::constant in pallet::config, expected type trait \ item"; - return Err(syn::Error::new(trait_item.span(), msg)) + return Err(syn::Error::new(trait_item.span(), msg)); }, } } @@ -392,7 +394,7 @@ impl ConfigDef { To disable this check, use `#[pallet::disable_frame_system_supertrait_check]`", frame_system, found, ); - return Err(syn::Error::new(item.span(), msg)) + return Err(syn::Error::new(item.span(), msg)); } Ok(Self { index, has_instance, consts_metadata, has_event_type, where_clause, attr_span }) diff --git a/frame/support/procedural/src/pallet/parse/error.rs b/frame/support/procedural/src/pallet/parse/error.rs index c6ce9b37c75a2..15c06706e44ef 100644 --- a/frame/support/procedural/src/pallet/parse/error.rs +++ b/frame/support/procedural/src/pallet/parse/error.rs @@ -55,11 +55,11 @@ impl ErrorDef { let item = if let syn::Item::Enum(item) = item { item } else { - return Err(syn::Error::new(item.span(), "Invalid pallet::error, expected item enum")) + return Err(syn::Error::new(item.span(), "Invalid pallet::error, expected item enum")); }; if !matches!(item.vis, syn::Visibility::Public(_)) { let msg = "Invalid pallet::error, `Error` must be public"; - return Err(syn::Error::new(item.span(), msg)) + return Err(syn::Error::new(item.span(), msg)); } let instances = @@ -67,7 +67,7 @@ impl ErrorDef { if item.generics.where_clause.is_some() { let msg = "Invalid pallet::error, where clause is not allowed on pallet error item"; - return Err(syn::Error::new(item.generics.where_clause.as_ref().unwrap().span(), msg)) + return Err(syn::Error::new(item.generics.where_clause.as_ref().unwrap().span(), msg)); } let error = syn::parse2::(item.ident.to_token_stream())?; @@ -85,7 +85,7 @@ impl ErrorDef { let msg = "Invalid pallet::error, unexpected discriminant, discriminants \ are not supported"; let span = variant.discriminant.as_ref().unwrap().0.span(); - return Err(syn::Error::new(span, msg)) + return Err(syn::Error::new(span, msg)); } Ok((variant.ident.clone(), field_ty, get_doc_literals(&variant.attrs))) diff --git a/frame/support/procedural/src/pallet/parse/event.rs b/frame/support/procedural/src/pallet/parse/event.rs index e046cacac88e8..5fbd2af711841 100644 --- a/frame/support/procedural/src/pallet/parse/event.rs +++ b/frame/support/procedural/src/pallet/parse/event.rs @@ -87,7 +87,7 @@ impl PalletEventAttrInfo { if deposit_event.is_none() { deposit_event = Some(attr) } else { - return Err(syn::Error::new(attr.span, "Duplicate attribute")) + return Err(syn::Error::new(attr.span, "Duplicate attribute")); } } @@ -104,7 +104,7 @@ impl EventDef { let item = if let syn::Item::Enum(item) = item { item } else { - return Err(syn::Error::new(item.span(), "Invalid pallet::event, expected item enum")) + return Err(syn::Error::new(item.span(), "Invalid pallet::event, expected item enum")); }; let event_attrs: Vec = @@ -114,7 +114,7 @@ impl EventDef { if !matches!(item.vis, syn::Visibility::Public(_)) { let msg = "Invalid pallet::event, `Event` must be public"; - return Err(syn::Error::new(item.span(), msg)) + return Err(syn::Error::new(item.span(), msg)); } let where_clause = item.generics.where_clause.clone(); diff --git a/frame/support/procedural/src/pallet/parse/extra_constants.rs b/frame/support/procedural/src/pallet/parse/extra_constants.rs index d8622da08461b..f9592d61f7ee1 100644 --- a/frame/support/procedural/src/pallet/parse/extra_constants.rs +++ b/frame/support/procedural/src/pallet/parse/extra_constants.rs @@ -84,7 +84,7 @@ impl ExtraConstantsDef { return Err(syn::Error::new( item.span(), "Invalid pallet::extra_constants, expected item impl", - )) + )); }; let instances = vec![ @@ -95,7 +95,7 @@ impl ExtraConstantsDef { if let Some((_, _, for_)) = item.trait_ { let msg = "Invalid pallet::call, expected no trait ident as in \ `impl<..> Pallet<..> { .. }`"; - return Err(syn::Error::new(for_.span(), msg)) + return Err(syn::Error::new(for_.span(), msg)); } let mut extra_constants = vec![]; @@ -104,28 +104,28 @@ impl ExtraConstantsDef { method } else { let msg = "Invalid pallet::call, only method accepted"; - return Err(syn::Error::new(impl_item.span(), msg)) + return Err(syn::Error::new(impl_item.span(), msg)); }; if !method.sig.inputs.is_empty() { let msg = "Invalid pallet::extra_constants, method must have 0 args"; - return Err(syn::Error::new(method.sig.span(), msg)) + return Err(syn::Error::new(method.sig.span(), msg)); } if !method.sig.generics.params.is_empty() { let msg = "Invalid pallet::extra_constants, method must have 0 generics"; - return Err(syn::Error::new(method.sig.generics.params[0].span(), msg)) + return Err(syn::Error::new(method.sig.generics.params[0].span(), msg)); } if method.sig.generics.where_clause.is_some() { let msg = "Invalid pallet::extra_constants, method must have no where clause"; - return Err(syn::Error::new(method.sig.generics.where_clause.span(), msg)) + return Err(syn::Error::new(method.sig.generics.where_clause.span(), msg)); } let type_ = match &method.sig.output { syn::ReturnType::Default => { let msg = "Invalid pallet::extra_constants, method must have a return type"; - return Err(syn::Error::new(method.span(), msg)) + return Err(syn::Error::new(method.span(), msg)); }, syn::ReturnType::Type(_, type_) => *type_.clone(), }; @@ -137,7 +137,7 @@ impl ExtraConstantsDef { if extra_constant_attrs.len() > 1 { let msg = "Invalid attribute in pallet::constant_name, only one attribute is expected"; - return Err(syn::Error::new(extra_constant_attrs[1].metadata_name.span(), msg)) + return Err(syn::Error::new(extra_constant_attrs[1].metadata_name.span(), msg)); } let metadata_name = extra_constant_attrs.pop().map(|attr| attr.metadata_name); diff --git a/frame/support/procedural/src/pallet/parse/genesis_build.rs b/frame/support/procedural/src/pallet/parse/genesis_build.rs index 9815b8d2203c4..6b033fdd0f331 100644 --- a/frame/support/procedural/src/pallet/parse/genesis_build.rs +++ b/frame/support/procedural/src/pallet/parse/genesis_build.rs @@ -40,7 +40,7 @@ impl GenesisBuildDef { item } else { let msg = "Invalid pallet::genesis_build, expected item impl"; - return Err(syn::Error::new(item.span(), msg)) + return Err(syn::Error::new(item.span(), msg)); }; let item_trait = &item diff --git a/frame/support/procedural/src/pallet/parse/genesis_config.rs b/frame/support/procedural/src/pallet/parse/genesis_config.rs index 45e765c018aae..e977b831cc32a 100644 --- a/frame/support/procedural/src/pallet/parse/genesis_config.rs +++ b/frame/support/procedural/src/pallet/parse/genesis_config.rs @@ -42,7 +42,7 @@ impl GenesisConfigDef { syn::Item::Struct(item) => (&item.vis, &item.ident, &item.generics), _ => { let msg = "Invalid pallet::genesis_config, expected enum or struct"; - return Err(syn::Error::new(item.span(), msg)) + return Err(syn::Error::new(item.span(), msg)); }, }; @@ -60,12 +60,12 @@ impl GenesisConfigDef { if !matches!(vis, syn::Visibility::Public(_)) { let msg = "Invalid pallet::genesis_config, GenesisConfig must be public"; - return Err(syn::Error::new(item_span, msg)) + return Err(syn::Error::new(item_span, msg)); } if ident != "GenesisConfig" { let msg = "Invalid pallet::genesis_config, ident must `GenesisConfig`"; - return Err(syn::Error::new(ident.span(), msg)) + return Err(syn::Error::new(ident.span(), msg)); } Ok(GenesisConfigDef { index, genesis_config: ident.clone(), instances, gen_kind }) diff --git a/frame/support/procedural/src/pallet/parse/helper.rs b/frame/support/procedural/src/pallet/parse/helper.rs index 8244079173581..c93ac4faca9b1 100644 --- a/frame/support/procedural/src/pallet/parse/helper.rs +++ b/frame/support/procedural/src/pallet/parse/helper.rs @@ -153,7 +153,7 @@ impl syn::parse::Parse for Unit { syn::parenthesized!(content in input); if !content.is_empty() { let msg = "unexpected tokens, expected nothing inside parenthesis as `()`"; - return Err(syn::Error::new(content.span(), msg)) + return Err(syn::Error::new(content.span(), msg)); } Ok(Self) } @@ -166,7 +166,7 @@ impl syn::parse::Parse for StaticLifetime { let lifetime = input.parse::()?; if lifetime.ident != "static" { let msg = "unexpected tokens, expected `static`"; - return Err(syn::Error::new(lifetime.ident.span(), msg)) + return Err(syn::Error::new(lifetime.ident.span(), msg)); } Ok(Self) } @@ -264,7 +264,7 @@ pub fn check_type_def_optional_gen( impl syn::parse::Parse for Checker { fn parse(input: syn::parse::ParseStream) -> syn::Result { if input.is_empty() { - return Ok(Self(None)) + return Ok(Self(None)); } let mut instance_usage = InstanceUsage { span: input.span(), has_instance: false }; @@ -272,7 +272,7 @@ pub fn check_type_def_optional_gen( input.parse::()?; if input.is_empty() { - return Ok(Self(Some(instance_usage))) + return Ok(Self(Some(instance_usage))); } let lookahead = input.lookahead1(); @@ -289,7 +289,7 @@ pub fn check_type_def_optional_gen( input.parse::()?; if input.is_empty() { - return Ok(Self(Some(instance_usage))) + return Ok(Self(Some(instance_usage))); } instance_usage.has_instance = true; @@ -431,7 +431,7 @@ pub fn check_type_def_gen( input.parse::()?; if input.is_empty() { - return Ok(Self(instance_usage)) + return Ok(Self(instance_usage)); } let lookahead = input.lookahead1(); @@ -448,7 +448,7 @@ pub fn check_type_def_gen( input.parse::()?; if input.is_empty() { - return Ok(Self(instance_usage)) + return Ok(Self(instance_usage)); } instance_usage.has_instance = true; @@ -539,7 +539,7 @@ pub fn check_type_value_gen( impl syn::parse::Parse for Checker { fn parse(input: syn::parse::ParseStream) -> syn::Result { if input.is_empty() { - return Ok(Self(None)) + return Ok(Self(None)); } input.parse::()?; @@ -549,7 +549,7 @@ pub fn check_type_value_gen( let mut instance_usage = InstanceUsage { span: input.span(), has_instance: false }; if input.is_empty() { - return Ok(Self(Some(instance_usage))) + return Ok(Self(Some(instance_usage))); } instance_usage.has_instance = true; diff --git a/frame/support/procedural/src/pallet/parse/hooks.rs b/frame/support/procedural/src/pallet/parse/hooks.rs index 2dc8f4da47c5f..4917f26a5f01b 100644 --- a/frame/support/procedural/src/pallet/parse/hooks.rs +++ b/frame/support/procedural/src/pallet/parse/hooks.rs @@ -42,7 +42,7 @@ impl HooksDef { item } else { let msg = "Invalid pallet::hooks, expected item impl"; - return Err(syn::Error::new(item.span(), msg)) + return Err(syn::Error::new(item.span(), msg)); }; let instances = vec![ @@ -67,7 +67,7 @@ impl HooksDef { quote::quote!(#item_trait) ); - return Err(syn::Error::new(item_trait.span(), msg)) + return Err(syn::Error::new(item_trait.span(), msg)); } let has_runtime_upgrade = item.items.iter().any(|i| match i { diff --git a/frame/support/procedural/src/pallet/parse/inherent.rs b/frame/support/procedural/src/pallet/parse/inherent.rs index a485eed4c40d9..6b7e3a402856e 100644 --- a/frame/support/procedural/src/pallet/parse/inherent.rs +++ b/frame/support/procedural/src/pallet/parse/inherent.rs @@ -32,22 +32,22 @@ impl InherentDef { item } else { let msg = "Invalid pallet::inherent, expected item impl"; - return Err(syn::Error::new(item.span(), msg)) + return Err(syn::Error::new(item.span(), msg)); }; if item.trait_.is_none() { let msg = "Invalid pallet::inherent, expected impl<..> ProvideInherent for Pallet<..>"; - return Err(syn::Error::new(item.span(), msg)) + return Err(syn::Error::new(item.span(), msg)); } if let Some(last) = item.trait_.as_ref().unwrap().1.segments.last() { if last.ident != "ProvideInherent" { let msg = "Invalid pallet::inherent, expected trait ProvideInherent"; - return Err(syn::Error::new(last.span(), msg)) + return Err(syn::Error::new(last.span(), msg)); } } else { let msg = "Invalid pallet::inherent, expected impl<..> ProvideInherent for Pallet<..>"; - return Err(syn::Error::new(item.span(), msg)) + return Err(syn::Error::new(item.span(), msg)); } let instances = vec![ diff --git a/frame/support/procedural/src/pallet/parse/mod.rs b/frame/support/procedural/src/pallet/parse/mod.rs index f91159248281c..e484b11d54e7d 100644 --- a/frame/support/procedural/src/pallet/parse/mod.rs +++ b/frame/support/procedural/src/pallet/parse/mod.rs @@ -96,8 +96,9 @@ impl Def { let pallet_attr: Option = helper::take_first_item_pallet_attr(item)?; match pallet_attr { - Some(PalletAttr::Config(span)) if config.is_none() => - config = Some(config::ConfigDef::try_from(&frame_system, span, index, item)?), + Some(PalletAttr::Config(span)) if config.is_none() => { + config = Some(config::ConfigDef::try_from(&frame_system, span, index, item)?) + }, Some(PalletAttr::Pallet(span)) if pallet_struct.is_none() => { let p = pallet_struct::PalletStructDef::try_from(span, index, item)?; pallet_struct = Some(p); @@ -106,12 +107,15 @@ impl Def { let m = hooks::HooksDef::try_from(span, index, item)?; hooks = Some(m); }, - Some(PalletAttr::RuntimeCall(span)) if call.is_none() => - call = Some(call::CallDef::try_from(span, index, item, dev_mode)?), - Some(PalletAttr::Error(span)) if error.is_none() => - error = Some(error::ErrorDef::try_from(span, index, item)?), - Some(PalletAttr::RuntimeEvent(span)) if event.is_none() => - event = Some(event::EventDef::try_from(span, index, item)?), + Some(PalletAttr::RuntimeCall(span)) if call.is_none() => { + call = Some(call::CallDef::try_from(span, index, item, dev_mode)?) + }, + Some(PalletAttr::Error(span)) if error.is_none() => { + error = Some(error::ErrorDef::try_from(span, index, item)?) + }, + Some(PalletAttr::RuntimeEvent(span)) if event.is_none() => { + event = Some(event::EventDef::try_from(span, index, item)?) + }, Some(PalletAttr::GenesisConfig(_)) if genesis_config.is_none() => { let g = genesis_config::GenesisConfigDef::try_from(index, item)?; genesis_config = Some(g); @@ -120,24 +124,29 @@ impl Def { let g = genesis_build::GenesisBuildDef::try_from(span, index, item)?; genesis_build = Some(g); }, - Some(PalletAttr::RuntimeOrigin(_)) if origin.is_none() => - origin = Some(origin::OriginDef::try_from(index, item)?), - Some(PalletAttr::Inherent(_)) if inherent.is_none() => - inherent = Some(inherent::InherentDef::try_from(index, item)?), - Some(PalletAttr::Storage(span)) => - storages.push(storage::StorageDef::try_from(span, index, item, dev_mode)?), + Some(PalletAttr::RuntimeOrigin(_)) if origin.is_none() => { + origin = Some(origin::OriginDef::try_from(index, item)?) + }, + Some(PalletAttr::Inherent(_)) if inherent.is_none() => { + inherent = Some(inherent::InherentDef::try_from(index, item)?) + }, + Some(PalletAttr::Storage(span)) => { + storages.push(storage::StorageDef::try_from(span, index, item, dev_mode)?) + }, Some(PalletAttr::ValidateUnsigned(_)) if validate_unsigned.is_none() => { let v = validate_unsigned::ValidateUnsignedDef::try_from(index, item)?; validate_unsigned = Some(v); }, - Some(PalletAttr::TypeValue(span)) => - type_values.push(type_value::TypeValueDef::try_from(span, index, item)?), - Some(PalletAttr::ExtraConstants(_)) => + Some(PalletAttr::TypeValue(span)) => { + type_values.push(type_value::TypeValueDef::try_from(span, index, item)?) + }, + Some(PalletAttr::ExtraConstants(_)) => { extra_constants = - Some(extra_constants::ExtraConstantsDef::try_from(index, item)?), + Some(extra_constants::ExtraConstantsDef::try_from(index, item)?) + }, Some(attr) => { let msg = "Invalid duplicated attribute"; - return Err(syn::Error::new(attr.span(), msg)) + return Err(syn::Error::new(attr.span(), msg)); }, None => (), } @@ -151,7 +160,7 @@ impl Def { genesis_config.as_ref().map_or("unused", |_| "used"), genesis_build.as_ref().map_or("unused", |_| "used"), ); - return Err(syn::Error::new(item_span, msg)) + return Err(syn::Error::new(item_span, msg)); } let def = Def { @@ -239,7 +248,7 @@ impl Def { let mut errors = instances.into_iter().filter_map(|instances| { if instances.has_instance == self.config.has_instance { - return None + return None; } let msg = if self.config.has_instance { "Invalid generic declaration, trait is defined with instance but generic use none" diff --git a/frame/support/procedural/src/pallet/parse/origin.rs b/frame/support/procedural/src/pallet/parse/origin.rs index 89929e3e8dbfc..9bda22c3f2e2d 100644 --- a/frame/support/procedural/src/pallet/parse/origin.rs +++ b/frame/support/procedural/src/pallet/parse/origin.rs @@ -42,7 +42,7 @@ impl OriginDef { syn::Item::Type(item) => (&item.vis, &item.ident, &item.generics), _ => { let msg = "Invalid pallet::origin, expected enum or struct or type"; - return Err(syn::Error::new(item.span(), msg)) + return Err(syn::Error::new(item.span(), msg)); }, }; @@ -59,12 +59,12 @@ impl OriginDef { if !matches!(vis, syn::Visibility::Public(_)) { let msg = "Invalid pallet::origin, Origin must be public"; - return Err(syn::Error::new(item_span, msg)) + return Err(syn::Error::new(item_span, msg)); } if ident != "Origin" { let msg = "Invalid pallet::origin, ident must `Origin`"; - return Err(syn::Error::new(ident.span(), msg)) + return Err(syn::Error::new(ident.span(), msg)); } Ok(OriginDef { index, has_instance, is_generic, instances }) diff --git a/frame/support/procedural/src/pallet/parse/pallet_struct.rs b/frame/support/procedural/src/pallet/parse/pallet_struct.rs index a96c310b6f1ca..9b79812e0a7c9 100644 --- a/frame/support/procedural/src/pallet/parse/pallet_struct.rs +++ b/frame/support/procedural/src/pallet/parse/pallet_struct.rs @@ -113,7 +113,7 @@ impl PalletStructDef { item } else { let msg = "Invalid pallet::pallet, expected struct definition"; - return Err(syn::Error::new(item.span(), msg)) + return Err(syn::Error::new(item.span(), msg)); }; let mut store = None; @@ -138,7 +138,7 @@ impl PalletStructDef { }, attr => { let msg = "Unexpected duplicated attribute"; - return Err(syn::Error::new(attr.span(), msg)) + return Err(syn::Error::new(attr.span(), msg)); }, } } @@ -147,12 +147,12 @@ impl PalletStructDef { if !matches!(item.vis, syn::Visibility::Public(_)) { let msg = "Invalid pallet::pallet, Pallet must be public"; - return Err(syn::Error::new(item.span(), msg)) + return Err(syn::Error::new(item.span(), msg)); } if item.generics.where_clause.is_some() { let msg = "Invalid pallet::pallet, where clause not supported on Pallet declaration"; - return Err(syn::Error::new(item.generics.where_clause.span(), msg)) + return Err(syn::Error::new(item.generics.where_clause.span(), msg)); } let instances = diff --git a/frame/support/procedural/src/pallet/parse/storage.rs b/frame/support/procedural/src/pallet/parse/storage.rs index 8b551ab31d6c3..35c2f5412d519 100644 --- a/frame/support/procedural/src/pallet/parse/storage.rs +++ b/frame/support/procedural/src/pallet/parse/storage.rs @@ -49,10 +49,10 @@ pub enum PalletStorageAttr { impl PalletStorageAttr { fn attr_span(&self) -> proc_macro2::Span { match self { - Self::Getter(_, span) | - Self::StorageName(_, span) | - Self::Unbounded(span) | - Self::WhitelistStorage(span) => *span, + Self::Getter(_, span) + | Self::StorageName(_, span) + | Self::Unbounded(span) + | Self::WhitelistStorage(span) => *span, } } } @@ -115,15 +115,17 @@ impl PalletStorageAttrInfo { for attr in attrs { match attr { PalletStorageAttr::Getter(ident, ..) if getter.is_none() => getter = Some(ident), - PalletStorageAttr::StorageName(name, ..) if rename_as.is_none() => - rename_as = Some(name), + PalletStorageAttr::StorageName(name, ..) if rename_as.is_none() => { + rename_as = Some(name) + }, PalletStorageAttr::Unbounded(..) if !unbounded => unbounded = true, PalletStorageAttr::WhitelistStorage(..) if !whitelisted => whitelisted = true, - attr => + attr => { return Err(syn::Error::new( attr.attr_span(), "Invalid attribute: Duplicate attribute", - )), + )) + }, } } @@ -238,8 +240,9 @@ impl StorageGenerics { Self::Map { value, key, .. } => Metadata::Map { value, key }, Self::CountedMap { value, key, .. } => Metadata::CountedMap { value, key }, Self::Value { value, .. } => Metadata::Value { value }, - Self::NMap { keygen, value, .. } => - Metadata::NMap { keys: collect_keys(&keygen)?, keygen, value }, + Self::NMap { keygen, value, .. } => { + Metadata::NMap { keys: collect_keys(&keygen)?, keygen, value } + }, }; Ok(res) @@ -248,11 +251,11 @@ impl StorageGenerics { /// Return the query kind from the defined generics fn query_kind(&self) -> Option { match &self { - Self::DoubleMap { query_kind, .. } | - Self::Map { query_kind, .. } | - Self::CountedMap { query_kind, .. } | - Self::Value { query_kind, .. } | - Self::NMap { query_kind, .. } => query_kind.clone(), + Self::DoubleMap { query_kind, .. } + | Self::Map { query_kind, .. } + | Self::CountedMap { query_kind, .. } + | Self::Value { query_kind, .. } + | Self::NMap { query_kind, .. } => query_kind.clone(), } } } @@ -293,8 +296,8 @@ fn check_generics( }; for (gen_name, gen_binding) in map { - if !mandatory_generics.contains(&gen_name.as_str()) && - !optional_generics.contains(&gen_name.as_str()) + if !mandatory_generics.contains(&gen_name.as_str()) + && !optional_generics.contains(&gen_name.as_str()) { let msg = format!( "Invalid pallet::storage, Unexpected generic `{}` for `{}`. {}", @@ -339,7 +342,7 @@ fn process_named_generics( let msg = "Invalid pallet::storage, Duplicated named generic"; let mut err = syn::Error::new(arg.ident.span(), msg); err.combine(syn::Error::new(other.ident.span(), msg)); - return Err(err) + return Err(err); } parsed.insert(arg.ident.to_string(), arg.clone()); } @@ -511,8 +514,9 @@ fn process_unnamed_generics( })?; let res = match storage { - StorageKind::Value => - (None, Metadata::Value { value: retrieve_arg(1)? }, retrieve_arg(2).ok()), + StorageKind::Value => { + (None, Metadata::Value { value: retrieve_arg(1)? }, retrieve_arg(2).ok()) + }, StorageKind::Map => ( None, Metadata::Map { key: retrieve_arg(2)?, value: retrieve_arg(3)? }, @@ -559,7 +563,7 @@ fn process_generics( in order to expand metadata, found `{}`.", found, ); - return Err(syn::Error::new(segment.ident.span(), msg)) + return Err(syn::Error::new(segment.ident.span(), msg)); }, }; @@ -570,7 +574,7 @@ fn process_generics( _ => { let msg = "Invalid pallet::storage, invalid number of generic generic arguments, \ expect more that 0 generic arguments."; - return Err(syn::Error::new(segment.span(), msg)) + return Err(syn::Error::new(segment.span(), msg)); }, }; @@ -617,7 +621,7 @@ fn extract_key(ty: &syn::Type) -> syn::Result { typ } else { let msg = "Invalid pallet::storage, expected type path"; - return Err(syn::Error::new(ty.span(), msg)) + return Err(syn::Error::new(ty.span(), msg)); }; let key_struct = typ.path.segments.last().ok_or_else(|| { @@ -626,14 +630,14 @@ fn extract_key(ty: &syn::Type) -> syn::Result { })?; if key_struct.ident != "Key" && key_struct.ident != "NMapKey" { let msg = "Invalid pallet::storage, expected Key or NMapKey struct"; - return Err(syn::Error::new(key_struct.ident.span(), msg)) + return Err(syn::Error::new(key_struct.ident.span(), msg)); } let ty_params = if let syn::PathArguments::AngleBracketed(args) = &key_struct.arguments { args } else { let msg = "Invalid pallet::storage, expected angle bracketed arguments"; - return Err(syn::Error::new(key_struct.arguments.span(), msg)) + return Err(syn::Error::new(key_struct.arguments.span(), msg)); }; if ty_params.args.len() != 2 { @@ -642,14 +646,14 @@ fn extract_key(ty: &syn::Type) -> syn::Result { for Key struct, expected 2 args, found {}", ty_params.args.len() ); - return Err(syn::Error::new(ty_params.span(), msg)) + return Err(syn::Error::new(ty_params.span(), msg)); } let key = match &ty_params.args[1] { syn::GenericArgument::Type(key_ty) => key_ty.clone(), _ => { let msg = "Invalid pallet::storage, expected type"; - return Err(syn::Error::new(ty_params.args[1].span(), msg)) + return Err(syn::Error::new(ty_params.args[1].span(), msg)); }, }; @@ -683,7 +687,7 @@ impl StorageDef { let item = if let syn::Item::Type(item) = item { item } else { - return Err(syn::Error::new(item.span(), "Invalid pallet::storage, expect item type.")) + return Err(syn::Error::new(item.span(), "Invalid pallet::storage, expect item type.")); }; let attrs: Vec = helper::take_item_pallet_attrs(&mut item.attrs)?; @@ -703,12 +707,12 @@ impl StorageDef { typ } else { let msg = "Invalid pallet::storage, expected type path"; - return Err(syn::Error::new(item.ty.span(), msg)) + return Err(syn::Error::new(item.ty.span(), msg)); }; if typ.path.segments.len() != 1 { let msg = "Invalid pallet::storage, expected type path with one segment"; - return Err(syn::Error::new(item.ty.span(), msg)) + return Err(syn::Error::new(item.ty.span(), msg)); } let (named_generics, metadata, query_kind) = process_generics(&typ.path.segments[0])?; @@ -727,16 +731,22 @@ impl StorageDef { .segments .last() .map_or(false, |s| s.ident == "OptionQuery") => - return Ok(Some(QueryKind::OptionQuery)), + { + return Ok(Some(QueryKind::OptionQuery)) + }, Type::Path(TypePath { path: Path { segments, .. }, .. }) if segments.last().map_or(false, |s| s.ident == "ResultQuery") => + { segments .last() .expect("segments is checked to have the last value; qed") - .clone(), + .clone() + }, Type::Path(path) if path.path.segments.last().map_or(false, |s| s.ident == "ValueQuery") => - return Ok(Some(QueryKind::ValueQuery)), + { + return Ok(Some(QueryKind::ValueQuery)) + }, _ => return Ok(None), }; @@ -750,7 +760,7 @@ impl StorageDef { for ResultQuery, expected 1 type argument, found {}", args.len(), ); - return Err(syn::Error::new(args.span(), msg)) + return Err(syn::Error::new(args.span(), msg)); } args[0].clone() @@ -761,7 +771,7 @@ impl StorageDef { expected angle-bracketed arguments, found `{}`", args.to_token_stream().to_string() ); - return Err(syn::Error::new(args.span(), msg)) + return Err(syn::Error::new(args.span(), msg)); }, }; @@ -777,7 +787,7 @@ impl StorageDef { segments, found {}", err_variant.len(), ); - return Err(syn::Error::new(err_variant.span(), msg)) + return Err(syn::Error::new(err_variant.span(), msg)); } let mut error = err_variant.clone(); let err_variant = error @@ -813,7 +823,7 @@ impl StorageDef { let msg = "Invalid pallet::storage, cannot generate getter because QueryKind is not \ identifiable. QueryKind must be `OptionQuery`, `ResultQuery`, `ValueQuery`, or default \ one to be identifiable."; - return Err(syn::Error::new(getter.span(), msg)) + return Err(syn::Error::new(getter.span(), msg)); } Ok(StorageDef { diff --git a/frame/support/procedural/src/pallet/parse/type_value.rs b/frame/support/procedural/src/pallet/parse/type_value.rs index a3d004cd8a532..cd1bcd0b48ee7 100644 --- a/frame/support/procedural/src/pallet/parse/type_value.rs +++ b/frame/support/procedural/src/pallet/parse/type_value.rs @@ -52,7 +52,7 @@ impl TypeValueDef { item } else { let msg = "Invalid pallet::type_value, expected item fn"; - return Err(syn::Error::new(item.span(), msg)) + return Err(syn::Error::new(item.span(), msg)); }; let mut docs = vec![]; @@ -60,13 +60,13 @@ impl TypeValueDef { if let Ok(syn::Meta::NameValue(meta)) = attr.parse_meta() { if meta.path.get_ident().map_or(false, |ident| ident == "doc") { docs.push(meta.lit); - continue + continue; } } let msg = "Invalid pallet::type_value, unexpected attribute, only doc attribute are \ allowed"; - return Err(syn::Error::new(attr.span(), msg)) + return Err(syn::Error::new(attr.span(), msg)); } if let Some(span) = item @@ -80,12 +80,12 @@ impl TypeValueDef { .or_else(|| item.sig.variadic.as_ref().map(|t| t.span())) { let msg = "Invalid pallet::type_value, unexpected token"; - return Err(syn::Error::new(span, msg)) + return Err(syn::Error::new(span, msg)); } if !item.sig.inputs.is_empty() { let msg = "Invalid pallet::type_value, unexpected argument"; - return Err(syn::Error::new(item.sig.inputs[0].span(), msg)) + return Err(syn::Error::new(item.sig.inputs[0].span(), msg)); } let vis = item.vis.clone(); @@ -95,7 +95,7 @@ impl TypeValueDef { syn::ReturnType::Type(_, type_) => type_, syn::ReturnType::Default => { let msg = "Invalid pallet::type_value, expected return type"; - return Err(syn::Error::new(item.sig.span(), msg)) + return Err(syn::Error::new(item.sig.span(), msg)); }, }; diff --git a/frame/support/procedural/src/pallet/parse/validate_unsigned.rs b/frame/support/procedural/src/pallet/parse/validate_unsigned.rs index 18d5a2dc4443f..e5cda19fc0b4e 100644 --- a/frame/support/procedural/src/pallet/parse/validate_unsigned.rs +++ b/frame/support/procedural/src/pallet/parse/validate_unsigned.rs @@ -32,24 +32,24 @@ impl ValidateUnsignedDef { item } else { let msg = "Invalid pallet::validate_unsigned, expected item impl"; - return Err(syn::Error::new(item.span(), msg)) + return Err(syn::Error::new(item.span(), msg)); }; if item.trait_.is_none() { let msg = "Invalid pallet::validate_unsigned, expected impl<..> ValidateUnsigned for \ Pallet<..>"; - return Err(syn::Error::new(item.span(), msg)) + return Err(syn::Error::new(item.span(), msg)); } if let Some(last) = item.trait_.as_ref().unwrap().1.segments.last() { if last.ident != "ValidateUnsigned" { let msg = "Invalid pallet::validate_unsigned, expected trait ValidateUnsigned"; - return Err(syn::Error::new(last.span(), msg)) + return Err(syn::Error::new(last.span(), msg)); } } else { let msg = "Invalid pallet::validate_unsigned, expected impl<..> ValidateUnsigned for \ Pallet<..>"; - return Err(syn::Error::new(item.span(), msg)) + return Err(syn::Error::new(item.span(), msg)); } let instances = vec![ diff --git a/frame/support/procedural/src/pallet_error.rs b/frame/support/procedural/src/pallet_error.rs index 216168131e43d..60cf4006a7b8e 100644 --- a/frame/support/procedural/src/pallet_error.rs +++ b/frame/support/procedural/src/pallet_error.rs @@ -35,8 +35,8 @@ pub fn derive_pallet_error(input: proc_macro::TokenStream) -> proc_macro::TokenS let max_encoded_size = match data { syn::Data::Struct(syn::DataStruct { fields, .. }) => match fields { - syn::Fields::Named(syn::FieldsNamed { named: fields, .. }) | - syn::Fields::Unnamed(syn::FieldsUnnamed { unnamed: fields, .. }) => { + syn::Fields::Named(syn::FieldsNamed { named: fields, .. }) + | syn::Fields::Unnamed(syn::FieldsUnnamed { unnamed: fields, .. }) => { let maybe_field_tys = fields .iter() .map(|f| generate_field_types(f, &frame_support)) @@ -94,7 +94,7 @@ pub fn derive_pallet_error(input: proc_macro::TokenStream) -> proc_macro::TokenS }, syn::Data::Union(syn::DataUnion { union_token, .. }) => { let msg = "Cannot derive `PalletError` for union; please implement it directly"; - return syn::Error::new(union_token.span, msg).into_compile_error().into() + return syn::Error::new(union_token.span, msg).into_compile_error().into(); }, }; @@ -127,13 +127,15 @@ fn generate_field_types( { syn::NestedMeta::Meta(syn::Meta::Path(path)) if path.get_ident().map_or(false, |i| i == "skip") => - return Ok(None), + { + return Ok(None) + }, syn::NestedMeta::Meta(syn::Meta::Path(path)) if path.get_ident().map_or(false, |i| i == "compact") => { let field_ty = &field.ty; - return Ok(Some(quote::quote!(#scrate::codec::Compact<#field_ty>))) + return Ok(Some(quote::quote!(#scrate::codec::Compact<#field_ty>))); }, syn::NestedMeta::Meta(syn::Meta::NameValue(syn::MetaNameValue { @@ -142,7 +144,7 @@ fn generate_field_types( .. })) if path.get_ident().map_or(false, |i| i == "encoded_as") => { let ty = proc_macro2::TokenStream::from_str(&lit_str.value())?; - return Ok(Some(ty)) + return Ok(Some(ty)); }, _ => (), @@ -173,7 +175,9 @@ fn generate_variant_field_types( { syn::NestedMeta::Meta(syn::Meta::Path(path)) if path.get_ident().map_or(false, |i| i == "skip") => - return Ok(None), + { + return Ok(None) + }, _ => (), } @@ -184,8 +188,8 @@ fn generate_variant_field_types( } match &variant.fields { - syn::Fields::Named(syn::FieldsNamed { named: fields, .. }) | - syn::Fields::Unnamed(syn::FieldsUnnamed { unnamed: fields, .. }) => { + syn::Fields::Named(syn::FieldsNamed { named: fields, .. }) + | syn::Fields::Unnamed(syn::FieldsUnnamed { unnamed: fields, .. }) => { let field_tys = fields .iter() .map(|field| generate_field_types(field, scrate)) diff --git a/frame/support/procedural/src/partial_eq_no_bound.rs b/frame/support/procedural/src/partial_eq_no_bound.rs index 31930c0c3dae3..9e5ace6225d3c 100644 --- a/frame/support/procedural/src/partial_eq_no_bound.rs +++ b/frame/support/procedural/src/partial_eq_no_bound.rs @@ -122,7 +122,7 @@ pub fn derive_partial_eq_no_bound(input: proc_macro::TokenStream) -> proc_macro: }, syn::Data::Union(_) => { let msg = "Union type not supported by `derive(PartialEqNoBound)`"; - return syn::Error::new(input.span(), msg).to_compile_error().into() + return syn::Error::new(input.span(), msg).to_compile_error().into(); }, }; diff --git a/frame/support/procedural/src/storage/genesis_config/genesis_config_def.rs b/frame/support/procedural/src/storage/genesis_config/genesis_config_def.rs index d24e50096f25e..474235bfff3e8 100644 --- a/frame/support/procedural/src/storage/genesis_config/genesis_config_def.rs +++ b/frame/support/procedural/src/storage/genesis_config/genesis_config_def.rs @@ -138,7 +138,7 @@ impl GenesisConfigDef { return Err(syn::Error::new( meta.span(), "extra genesis config items do not support `cfg` attribute", - )) + )); } Ok(meta) }) diff --git a/frame/support/procedural/src/storage/mod.rs b/frame/support/procedural/src/storage/mod.rs index e8e2d7529cb3f..3db2d79c98460 100644 --- a/frame/support/procedural/src/storage/mod.rs +++ b/frame/support/procedural/src/storage/mod.rs @@ -259,20 +259,24 @@ impl StorageLineDefExt { hidden_crate: &proc_macro2::TokenStream, ) -> Self { let is_generic = match &storage_def.storage_type { - StorageLineTypeDef::Simple(value) => - ext::type_contains_ident(value, &def.module_runtime_generic), - StorageLineTypeDef::Map(map) => - ext::type_contains_ident(&map.key, &def.module_runtime_generic) || - ext::type_contains_ident(&map.value, &def.module_runtime_generic), - StorageLineTypeDef::DoubleMap(map) => - ext::type_contains_ident(&map.key1, &def.module_runtime_generic) || - ext::type_contains_ident(&map.key2, &def.module_runtime_generic) || - ext::type_contains_ident(&map.value, &def.module_runtime_generic), - StorageLineTypeDef::NMap(map) => + StorageLineTypeDef::Simple(value) => { + ext::type_contains_ident(value, &def.module_runtime_generic) + }, + StorageLineTypeDef::Map(map) => { + ext::type_contains_ident(&map.key, &def.module_runtime_generic) + || ext::type_contains_ident(&map.value, &def.module_runtime_generic) + }, + StorageLineTypeDef::DoubleMap(map) => { + ext::type_contains_ident(&map.key1, &def.module_runtime_generic) + || ext::type_contains_ident(&map.key2, &def.module_runtime_generic) + || ext::type_contains_ident(&map.value, &def.module_runtime_generic) + }, + StorageLineTypeDef::NMap(map) => { map.keys .iter() - .any(|key| ext::type_contains_ident(key, &def.module_runtime_generic)) || - ext::type_contains_ident(&map.value, &def.module_runtime_generic), + .any(|key| ext::type_contains_ident(key, &def.module_runtime_generic)) + || ext::type_contains_ident(&map.value, &def.module_runtime_generic) + }, }; let query_type = match &storage_def.storage_type { @@ -395,7 +399,7 @@ impl NMapDef { if self.keys.len() == 1 { let hasher = &self.hashers[0].to_storage_hasher_struct(); let key = &self.keys[0]; - return quote!( #scrate::storage::types::Key<#scrate::#hasher, #key> ) + return quote!( #scrate::storage::types::Key<#scrate::#hasher, #key> ); } let key_hasher = self @@ -413,7 +417,7 @@ impl NMapDef { fn to_key_tuple(&self) -> proc_macro2::TokenStream { if self.keys.len() == 1 { let key = &self.keys[0]; - return quote!(#key) + return quote!(#key); } let tuple = self.keys.iter().map(|key| quote!(#key)).collect::>(); diff --git a/frame/support/procedural/src/storage/parse.rs b/frame/support/procedural/src/storage/parse.rs index 54026b7d78b19..a3b21c829eb3e 100644 --- a/frame/support/procedural/src/storage/parse.rs +++ b/frame/support/procedural/src/storage/parse.rs @@ -367,16 +367,17 @@ fn get_module_instance( it is now defined at frame_support::traits::Instance. Expect `Instance` found `{}`", instantiable.as_ref().unwrap(), ); - return Err(syn::Error::new(instantiable.span(), msg)) + return Err(syn::Error::new(instantiable.span(), msg)); } match (instance, instantiable, default_instance) { - (Some(instance), Some(instantiable), default_instance) => + (Some(instance), Some(instantiable), default_instance) => { Ok(Some(super::ModuleInstanceDef { instance_generic: instance, instance_trait: instantiable, instance_default: default_instance, - })), + })) + }, (None, None, None) => Ok(None), (Some(instance), None, _) => Err(syn::Error::new( instance.span(), @@ -430,7 +431,7 @@ pub fn parse(input: syn::parse::ParseStream) -> syn::Result - line.max_values.inner.map(|i| i.expr.content), - DeclStorageType::Simple(_) => + DeclStorageType::Map(_) | DeclStorageType::DoubleMap(_) | DeclStorageType::NMap(_) => { + line.max_values.inner.map(|i| i.expr.content) + }, + DeclStorageType::Simple(_) => { if let Some(max_values) = line.max_values.inner { let msg = "unexpected max_values attribute for storage value."; let span = max_values.max_values_keyword.span(); - return Err(syn::Error::new(span, msg)) + return Err(syn::Error::new(span, msg)); } else { Some(syn::parse_quote!(1u32)) - }, + } + }, }; let span = line.storage_type.span(); @@ -524,14 +527,15 @@ fn parse_storage_line_defs( key: map.key, value: map.value, }), - DeclStorageType::DoubleMap(map) => + DeclStorageType::DoubleMap(map) => { super::StorageLineTypeDef::DoubleMap(Box::new(super::DoubleMapDef { hasher1: map.hasher1.inner.ok_or_else(no_hasher_error)?.into(), hasher2: map.hasher2.inner.ok_or_else(no_hasher_error)?.into(), key1: map.key1, key2: map.key2, value: map.value, - })), + })) + }, DeclStorageType::NMap(map) => super::StorageLineTypeDef::NMap(super::NMapDef { hashers: map .storage_keys diff --git a/frame/support/procedural/src/storage/print_pallet_upgrade.rs b/frame/support/procedural/src/storage/print_pallet_upgrade.rs index 03f09a7edb48e..49e8fd93a98b8 100644 --- a/frame/support/procedural/src/storage/print_pallet_upgrade.rs +++ b/frame/support/procedural/src/storage/print_pallet_upgrade.rs @@ -26,7 +26,7 @@ fn to_cleaned_string(t: impl quote::ToTokens) -> String { /// Print an incomplete upgrade from decl_storage macro to new pallet attribute. pub fn maybe_print_pallet_upgrade(def: &super::DeclStorageDefExt) { if !check_print_pallet_upgrade() { - return + return; } let scrate = "e::quote!(frame_support); @@ -58,7 +58,7 @@ pub fn maybe_print_pallet_upgrade(def: &super::DeclStorageDefExt) { Ok(g) => g, Err(err) => { println!("Could not print upgrade due compile error: {:?}", err); - return + return; }, }; diff --git a/frame/support/procedural/src/storage_alias.rs b/frame/support/procedural/src/storage_alias.rs index e0df0123595b9..33630b706e084 100644 --- a/frame/support/procedural/src/storage_alias.rs +++ b/frame/support/procedural/src/storage_alias.rs @@ -294,20 +294,20 @@ impl StorageType { /// The prefix for this storage type. fn prefix(&self) -> &SimplePath { match self { - Self::Value { prefix, .. } | - Self::Map { prefix, .. } | - Self::NMap { prefix, .. } | - Self::DoubleMap { prefix, .. } => prefix, + Self::Value { prefix, .. } + | Self::Map { prefix, .. } + | Self::NMap { prefix, .. } + | Self::DoubleMap { prefix, .. } => prefix, } } /// The prefix generics for this storage type. fn prefix_generics(&self) -> Option<&TypeGenerics> { match self { - Self::Value { prefix_generics, .. } | - Self::Map { prefix_generics, .. } | - Self::NMap { prefix_generics, .. } | - Self::DoubleMap { prefix_generics, .. } => prefix_generics.as_ref(), + Self::Value { prefix_generics, .. } + | Self::Map { prefix_generics, .. } + | Self::NMap { prefix_generics, .. } + | Self::DoubleMap { prefix_generics, .. } => prefix_generics.as_ref(), } } } @@ -431,7 +431,7 @@ impl Parse for Input { } else if lookahead.peek(Token![=]) { None } else { - return Err(lookahead.error()) + return Err(lookahead.error()); }; let lookahead = input.lookahead1(); @@ -440,7 +440,7 @@ impl Parse for Input { } else if lookahead.peek(Token![=]) { None } else { - return Err(lookahead.error()) + return Err(lookahead.error()); }; let _equal = input.parse()?; @@ -513,7 +513,7 @@ fn generate_storage_instance( visibility: &Visibility, ) -> Result { if let Some(ident) = prefix.get_ident().filter(|i| *i == "_") { - return Err(Error::new(ident.span(), "`_` is not allowed as prefix by `storage_alias`.")) + return Err(Error::new(ident.span(), "`_` is not allowed as prefix by `storage_alias`.")); } let (pallet_prefix, impl_generics, type_generics) = @@ -541,7 +541,7 @@ fn generate_storage_instance( return Err(Error::new_spanned( prefix, "If there are no generics, the prefix is only allowed to be an identifier.", - )) + )); }; let where_clause = storage_where_clause.map(|w| quote!(#w)).unwrap_or_default(); diff --git a/frame/support/procedural/tools/src/syn_ext.rs b/frame/support/procedural/tools/src/syn_ext.rs index 1a2d7c1d372ad..31163b63fcf6c 100644 --- a/frame/support/procedural/tools/src/syn_ext.rs +++ b/frame/support/procedural/tools/src/syn_ext.rs @@ -171,7 +171,7 @@ pub fn extract_type_option(typ: &syn::Type) -> Option { // Option has only one type argument in angle bracket. if let syn::PathArguments::AngleBracketed(a) = &v.arguments { if let syn::GenericArgument::Type(typ) = a.args.last()? { - return Some(typ.clone()) + return Some(typ.clone()); } } } diff --git a/frame/support/src/dispatch.rs b/frame/support/src/dispatch.rs index d497a672e2970..4e4faf28856e2 100644 --- a/frame/support/src/dispatch.rs +++ b/frame/support/src/dispatch.rs @@ -460,7 +460,7 @@ pub trait WeighData { impl WeighData for Weight { fn weigh_data(&self, _: T) -> Weight { - return *self + return *self; } } @@ -472,13 +472,13 @@ impl PaysFee for (Weight, DispatchClass, Pays) { impl WeighData for (Weight, DispatchClass) { fn weigh_data(&self, args: T) -> Weight { - return self.0.weigh_data(args) + return self.0.weigh_data(args); } } impl WeighData for (Weight, DispatchClass, Pays) { fn weigh_data(&self, args: T) -> Weight { - return self.0.weigh_data(args) + return self.0.weigh_data(args); } } @@ -496,7 +496,7 @@ impl PaysFee for (Weight, DispatchClass) { impl WeighData for (Weight, Pays) { fn weigh_data(&self, args: T) -> Weight { - return self.0.weigh_data(args) + return self.0.weigh_data(args); } } @@ -580,13 +580,13 @@ impl PaysFee for u64 { impl WeighData for u64 { fn weigh_data(&self, _: T) -> Weight { - return Weight::zero().set_ref_time(*self) + return Weight::zero().set_ref_time(*self); } } impl WeighData for (u64, DispatchClass, Pays) { fn weigh_data(&self, args: T) -> Weight { - return self.0.weigh_data(args) + return self.0.weigh_data(args); } } @@ -604,7 +604,7 @@ impl PaysFee for (u64, DispatchClass, Pays) { impl WeighData for (u64, DispatchClass) { fn weigh_data(&self, args: T) -> Weight { - return self.0.weigh_data(args) + return self.0.weigh_data(args); } } @@ -622,7 +622,7 @@ impl PaysFee for (u64, DispatchClass) { impl WeighData for (u64, Pays) { fn weigh_data(&self, args: T) -> Weight { - return self.0.weigh_data(args) + return self.0.weigh_data(args); } } @@ -3258,7 +3258,7 @@ mod tests { fn index() -> Option { let type_id = sp_std::any::TypeId::of::

(); if type_id == sp_std::any::TypeId::of::() { - return Some(0) + return Some(0); } None @@ -3266,7 +3266,7 @@ mod tests { fn name() -> Option<&'static str> { let type_id = sp_std::any::TypeId::of::

(); if type_id == sp_std::any::TypeId::of::() { - return Some("Test") + return Some("Test"); } None @@ -3274,7 +3274,7 @@ mod tests { fn module_name() -> Option<&'static str> { let type_id = sp_std::any::TypeId::of::

(); if type_id == sp_std::any::TypeId::of::() { - return Some("tests") + return Some("tests"); } None @@ -3282,7 +3282,7 @@ mod tests { fn crate_version() -> Option { let type_id = sp_std::any::TypeId::of::

(); if type_id == sp_std::any::TypeId::of::() { - return Some(frame_support::crate_to_crate_version!()) + return Some(frame_support::crate_to_crate_version!()); } None diff --git a/frame/support/src/hash.rs b/frame/support/src/hash.rs index 9bdad6d9d59de..abfaf72e4dc16 100644 --- a/frame/support/src/hash.rs +++ b/frame/support/src/hash.rs @@ -111,7 +111,7 @@ impl ReversibleStorageHasher for Twox64Concat { fn reverse(x: &[u8]) -> &[u8] { if x.len() < 8 { log::error!("Invalid reverse: hash length too short"); - return &[] + return &[]; } &x[8..] } @@ -133,7 +133,7 @@ impl ReversibleStorageHasher for Blake2_128Concat { fn reverse(x: &[u8]) -> &[u8] { if x.len() < 16 { log::error!("Invalid reverse: hash length too short"); - return &[] + return &[]; } &x[16..] } diff --git a/frame/support/src/lib.rs b/frame/support/src/lib.rs index 84e416e50544d..5da391fd7b654 100644 --- a/frame/support/src/lib.rs +++ b/frame/support/src/lib.rs @@ -685,7 +685,7 @@ pub use frame_support_procedural::crate_to_crate_version; #[macro_export] macro_rules! fail { ( $y:expr ) => {{ - return Err($y.into()) + return Err($y.into()); }}; } diff --git a/frame/support/src/storage/child.rs b/frame/support/src/storage/child.rs index 397daaa82a677..89b5dfcbe7b95 100644 --- a/frame/support/src/storage/child.rs +++ b/frame/support/src/storage/child.rs @@ -112,8 +112,9 @@ pub fn take_or_else T>( /// Check to see if `key` has an explicit entry in storage. pub fn exists(child_info: &ChildInfo, key: &[u8]) -> bool { match child_info.child_type() { - ChildType::ParentKeyId => - sp_io::default_child_storage::exists(child_info.storage_key(), key), + ChildType::ParentKeyId => { + sp_io::default_child_storage::exists(child_info.storage_key(), key) + }, } } @@ -139,8 +140,9 @@ pub fn exists(child_info: &ChildInfo, key: &[u8]) -> bool { #[deprecated = "Use `clear_storage` instead"] pub fn kill_storage(child_info: &ChildInfo, limit: Option) -> KillStorageResult { match child_info.child_type() { - ChildType::ParentKeyId => - sp_io::default_child_storage::storage_kill(child_info.storage_key(), limit), + ChildType::ParentKeyId => { + sp_io::default_child_storage::storage_kill(child_info.storage_key(), limit) + }, } } @@ -185,8 +187,9 @@ pub fn clear_storage( // enabled. // sp_io::default_child_storage::storage_kill(prefix, maybe_limit, maybe_cursor) let r = match child_info.child_type() { - ChildType::ParentKeyId => - sp_io::default_child_storage::storage_kill(child_info.storage_key(), maybe_limit), + ChildType::ParentKeyId => { + sp_io::default_child_storage::storage_kill(child_info.storage_key(), maybe_limit) + }, }; use sp_io::KillStorageResult::*; let (maybe_cursor, backend) = match r { @@ -215,16 +218,18 @@ pub fn get_raw(child_info: &ChildInfo, key: &[u8]) -> Option> { /// Put a raw byte slice into storage. pub fn put_raw(child_info: &ChildInfo, key: &[u8], value: &[u8]) { match child_info.child_type() { - ChildType::ParentKeyId => - sp_io::default_child_storage::set(child_info.storage_key(), key, value), + ChildType::ParentKeyId => { + sp_io::default_child_storage::set(child_info.storage_key(), key, value) + }, } } /// Calculate current child root value. pub fn root(child_info: &ChildInfo, version: StateVersion) -> Vec { match child_info.child_type() { - ChildType::ParentKeyId => - sp_io::default_child_storage::root(child_info.storage_key(), version), + ChildType::ParentKeyId => { + sp_io::default_child_storage::root(child_info.storage_key(), version) + }, } } diff --git a/frame/support/src/storage/generator/double_map.rs b/frame/support/src/storage/generator/double_map.rs index c95dcee9d7e5c..77d925917cd5f 100644 --- a/frame/support/src/storage/generator/double_map.rs +++ b/frame/support/src/storage/generator/double_map.rs @@ -473,7 +473,7 @@ where Some(value) => value, None => { log::error!("Invalid translate: fail to decode old value"); - continue + continue; }, }; let mut key_material = G::Hasher1::reverse(&previous_key[prefix.len()..]); @@ -481,7 +481,7 @@ where Ok(key1) => key1, Err(_) => { log::error!("Invalid translate: fail to decode key1"); - continue + continue; }, }; @@ -490,7 +490,7 @@ where Ok(key2) => key2, Err(_) => { log::error!("Invalid translate: fail to decode key2"); - continue + continue; }, }; diff --git a/frame/support/src/storage/generator/map.rs b/frame/support/src/storage/generator/map.rs index f6c8eaa270bb3..beb6805f2be77 100644 --- a/frame/support/src/storage/generator/map.rs +++ b/frame/support/src/storage/generator/map.rs @@ -115,7 +115,7 @@ impl Iter } }, None => None, - } + }; } } } @@ -188,7 +188,7 @@ where Some(value) => value, None => { log::error!("Invalid translate: fail to decode old value"); - continue + continue; }, }; @@ -197,7 +197,7 @@ where Ok(key) => key, Err(_) => { log::error!("Invalid translate: fail to decode key"); - continue + continue; }, }; diff --git a/frame/support/src/storage/generator/nmap.rs b/frame/support/src/storage/generator/nmap.rs index 79f3d72044e28..6162cec1f0a16 100755 --- a/frame/support/src/storage/generator/nmap.rs +++ b/frame/support/src/storage/generator/nmap.rs @@ -430,7 +430,7 @@ impl> Some(value) => value, None => { log::error!("Invalid translate: fail to decode old value"); - continue + continue; }, }; @@ -438,7 +438,7 @@ impl> Ok((final_key, _)) => final_key, Err(_) => { log::error!("Invalid translate: fail to decode key"); - continue + continue; }, }; diff --git a/frame/support/src/storage/migration.rs b/frame/support/src/storage/migration.rs index 67001fc4e1f42..f30f866a9bc77 100644 --- a/frame/support/src/storage/migration.rs +++ b/frame/support/src/storage/migration.rs @@ -87,7 +87,7 @@ impl Iterator for StorageIterator { } }, None => None, - } + }; } } } @@ -160,7 +160,7 @@ impl Iterator } }, None => None, - } + }; } } } @@ -372,7 +372,7 @@ pub fn move_pallet(old_pallet_name: &[u8], new_pallet_name: &[u8]) { /// NOTE: The value at the key `from_prefix` is not moved. pub fn move_prefix(from_prefix: &[u8], to_prefix: &[u8]) { if from_prefix == to_prefix { - return + return; } let iter = PrefixIterator::<_> { diff --git a/frame/support/src/storage/mod.rs b/frame/support/src/storage/mod.rs index 333f4382557b1..d20fffc8345a5 100644 --- a/frame/support/src/storage/mod.rs +++ b/frame/support/src/storage/mod.rs @@ -912,7 +912,7 @@ impl Iterator for PrefixIterator Iterator for PrefixIterator None, - } + }; } } } @@ -1010,12 +1010,12 @@ impl Iterator for KeyPrefixIterator { Ok(item) => return Some(item), Err(e) => { log::error!("key failed to decode at {:?}: {:?}", self.previous_key, e); - continue + continue; }, } } - return None + return None; } } } @@ -1125,7 +1125,7 @@ impl Iterator for ChildTriePrefixIterator { "next_key returned a key with no value at {:?}", self.previous_key, ); - continue + continue; }, }; if self.drain { @@ -1140,14 +1140,14 @@ impl Iterator for ChildTriePrefixIterator { self.previous_key, e, ); - continue + continue; }, }; Some(item) }, None => None, - } + }; } } } @@ -1255,7 +1255,7 @@ pub trait StoragePrefixedMap { }, None => { log::error!("old key failed to decode at {:?}", previous_key); - continue + continue; }, } } diff --git a/frame/support/src/storage/storage_noop_guard.rs b/frame/support/src/storage/storage_noop_guard.rs index 7186c3eaf467a..6a5804723dce0 100644 --- a/frame/support/src/storage/storage_noop_guard.rs +++ b/frame/support/src/storage/storage_noop_guard.rs @@ -49,7 +49,7 @@ impl Drop for StorageNoopGuard { fn drop(&mut self) { // No need to double panic, eg. inside a test assertion failure. if sp_std::thread::panicking() { - return + return; } assert_eq!( frame_support::storage_root(frame_support::StateVersion::V1), diff --git a/frame/support/src/storage/transactional.rs b/frame/support/src/storage/transactional.rs index 909d5909ed8bd..d283a434a0f82 100644 --- a/frame/support/src/storage/transactional.rs +++ b/frame/support/src/storage/transactional.rs @@ -57,7 +57,7 @@ fn kill_transaction_level() { fn inc_transaction_level() -> Result { let existing_levels = get_transaction_level(); if existing_levels >= TRANSACTIONAL_LIMIT { - return Err(()) + return Err(()); } // Cannot overflow because of check above. set_transaction_level(existing_levels + 1); @@ -232,7 +232,7 @@ mod tests { fn recursive_transactional(num: u32) -> DispatchResult { if num == 0 { - return Ok(()) + return Ok(()); } with_transaction(|| -> TransactionOutcome { diff --git a/frame/support/src/traits/tokens/fungible/balanced.rs b/frame/support/src/traits/tokens/fungible/balanced.rs index 0e75ccc22d050..ed1a032283bf5 100644 --- a/frame/support/src/traits/tokens/fungible/balanced.rs +++ b/frame/support/src/traits/tokens/fungible/balanced.rs @@ -165,7 +165,7 @@ pub trait Unbalanced: Inspect { ) -> Result { let old_balance = Self::balance(who); let (mut new_balance, mut amount) = if Self::reducible_balance(who, false) < amount { - return Err(TokenError::NoFunds.into()) + return Err(TokenError::NoFunds.into()); } else { (old_balance - amount, amount) }; @@ -226,7 +226,7 @@ pub trait Unbalanced: Inspect { let old_balance = Self::balance(who); let new_balance = old_balance.checked_add(&amount).ok_or(ArithmeticError::Overflow)?; if new_balance < Self::minimum_balance() { - return Err(TokenError::BelowMinimum.into()) + return Err(TokenError::BelowMinimum.into()); } if old_balance != new_balance { Self::set_balance(who, new_balance)?; diff --git a/frame/support/src/traits/tokens/fungibles/balanced.rs b/frame/support/src/traits/tokens/fungibles/balanced.rs index 9e50ff834a874..f1b9144093690 100644 --- a/frame/support/src/traits/tokens/fungibles/balanced.rs +++ b/frame/support/src/traits/tokens/fungibles/balanced.rs @@ -186,7 +186,7 @@ pub trait Unbalanced: Inspect { ) -> Result { let old_balance = Self::balance(asset, who); let (mut new_balance, mut amount) = if Self::reducible_balance(asset, who, false) < amount { - return Err(TokenError::NoFunds.into()) + return Err(TokenError::NoFunds.into()); } else { (old_balance - amount, amount) }; @@ -252,7 +252,7 @@ pub trait Unbalanced: Inspect { let old_balance = Self::balance(asset, who); let new_balance = old_balance.checked_add(&amount).ok_or(ArithmeticError::Overflow)?; if new_balance < Self::minimum_balance(asset) { - return Err(TokenError::BelowMinimum.into()) + return Err(TokenError::BelowMinimum.into()); } if old_balance != new_balance { Self::set_balance(asset, who, new_balance)?; diff --git a/frame/support/src/traits/tokens/imbalance.rs b/frame/support/src/traits/tokens/imbalance.rs index d721beb41494c..25c8503131fba 100644 --- a/frame/support/src/traits/tokens/imbalance.rs +++ b/frame/support/src/traits/tokens/imbalance.rs @@ -83,7 +83,7 @@ pub trait Imbalance: Sized + TryDrop + Default { { let total: u32 = first.saturating_add(second); if total == 0 { - return (Self::zero(), Self::zero()) + return (Self::zero(), Self::zero()); } let amount1 = self.peek().saturating_mul(first.into()) / total.into(); self.split(amount1) diff --git a/frame/support/src/traits/tokens/imbalance/signed_imbalance.rs b/frame/support/src/traits/tokens/imbalance/signed_imbalance.rs index f969a4363405a..dfa27068c7cb1 100644 --- a/frame/support/src/traits/tokens/imbalance/signed_imbalance.rs +++ b/frame/support/src/traits/tokens/imbalance/signed_imbalance.rs @@ -54,10 +54,12 @@ impl< /// both. pub fn merge(self, other: Self) -> Self { match (self, other) { - (SignedImbalance::Positive(one), SignedImbalance::Positive(other)) => - SignedImbalance::Positive(one.merge(other)), - (SignedImbalance::Negative(one), SignedImbalance::Negative(other)) => - SignedImbalance::Negative(one.merge(other)), + (SignedImbalance::Positive(one), SignedImbalance::Positive(other)) => { + SignedImbalance::Positive(one.merge(other)) + }, + (SignedImbalance::Negative(one), SignedImbalance::Negative(other)) => { + SignedImbalance::Negative(one.merge(other)) + }, (SignedImbalance::Positive(one), SignedImbalance::Negative(other)) => { match one.offset(other) { SameOrOther::Same(positive) => SignedImbalance::Positive(positive), diff --git a/frame/support/src/traits/try_runtime.rs b/frame/support/src/traits/try_runtime.rs index 640bb566a65af..cf169426aa996 100644 --- a/frame/support/src/traits/try_runtime.rs +++ b/frame/support/src/traits/try_runtime.rs @@ -66,7 +66,7 @@ impl sp_std::str::FromStr for Select { match s { "all" | "All" => Ok(Select::All), "none" | "None" => Ok(Select::None), - _ => + _ => { if s.starts_with("rr-") { let count = s .split_once('-') @@ -76,7 +76,8 @@ impl sp_std::str::FromStr for Select { } else { let pallets = s.split(',').map(|x| x.as_bytes().to_vec()).collect::>(); Ok(Select::Only(pallets)) - }, + } + }, } } } diff --git a/frame/support/test/tests/construct_runtime_ui.rs b/frame/support/test/tests/construct_runtime_ui.rs index 42fd87ca95c0e..d643eaeac7e37 100644 --- a/frame/support/test/tests/construct_runtime_ui.rs +++ b/frame/support/test/tests/construct_runtime_ui.rs @@ -21,7 +21,7 @@ fn ui() { // Only run the ui tests when `RUN_UI_TESTS` is set. if std::env::var("RUN_UI_TESTS").is_err() { - return + return; } // As trybuild is using `cargo check`, we don't need the real WASM binaries. diff --git a/frame/support/test/tests/decl_module_ui.rs b/frame/support/test/tests/decl_module_ui.rs index 292451335e7ea..9328b7b63cd1e 100644 --- a/frame/support/test/tests/decl_module_ui.rs +++ b/frame/support/test/tests/decl_module_ui.rs @@ -21,7 +21,7 @@ fn decl_module_ui() { // Only run the ui tests when `RUN_UI_TESTS` is set. if std::env::var("RUN_UI_TESTS").is_err() { - return + return; } // As trybuild is using `cargo check`, we don't need the real WASM binaries. diff --git a/frame/support/test/tests/decl_storage_ui.rs b/frame/support/test/tests/decl_storage_ui.rs index 34dfea8601ab9..40487273ad5ae 100644 --- a/frame/support/test/tests/decl_storage_ui.rs +++ b/frame/support/test/tests/decl_storage_ui.rs @@ -21,7 +21,7 @@ fn decl_storage_ui() { // Only run the ui tests when `RUN_UI_TESTS` is set. if std::env::var("RUN_UI_TESTS").is_err() { - return + return; } // As trybuild is using `cargo check`, we don't need the real WASM binaries. diff --git a/frame/support/test/tests/derive_no_bound_ui.rs b/frame/support/test/tests/derive_no_bound_ui.rs index d714e1113625a..d6eda198c9a9b 100644 --- a/frame/support/test/tests/derive_no_bound_ui.rs +++ b/frame/support/test/tests/derive_no_bound_ui.rs @@ -21,7 +21,7 @@ fn derive_no_bound_ui() { // Only run the ui tests when `RUN_UI_TESTS` is set. if std::env::var("RUN_UI_TESTS").is_err() { - return + return; } // As trybuild is using `cargo check`, we don't need the real WASM binaries. diff --git a/frame/support/test/tests/pallet.rs b/frame/support/test/tests/pallet.rs index 0fd32dad2242a..5a29b70e134a1 100644 --- a/frame/support/test/tests/pallet.rs +++ b/frame/support/test/tests/pallet.rs @@ -407,7 +407,7 @@ pub mod pallet { let _ = T::AccountId::from(SomeType1); // Test for where clause let _ = T::AccountId::from(SomeType5); // Test for where clause if matches!(call, Call::foo_storage_layer { .. }) { - return Ok(ValidTransaction::default()) + return Ok(ValidTransaction::default()); } Err(TransactionValidityError::Invalid(InvalidTransaction::Call)) } @@ -1552,9 +1552,9 @@ fn metadata() { }, ]; - let empty_doc = pallets[0].event.as_ref().unwrap().ty.type_info().docs().is_empty() && - pallets[0].error.as_ref().unwrap().ty.type_info().docs().is_empty() && - pallets[0].calls.as_ref().unwrap().ty.type_info().docs().is_empty(); + let empty_doc = pallets[0].event.as_ref().unwrap().ty.type_info().docs().is_empty() + && pallets[0].error.as_ref().unwrap().ty.type_info().docs().is_empty() + && pallets[0].calls.as_ref().unwrap().ty.type_info().docs().is_empty(); if cfg!(feature = "no-metadata-docs") { assert!(empty_doc) diff --git a/frame/support/test/tests/pallet_compatibility.rs b/frame/support/test/tests/pallet_compatibility.rs index 398137d644ee4..209abac8a3368 100644 --- a/frame/support/test/tests/pallet_compatibility.rs +++ b/frame/support/test/tests/pallet_compatibility.rs @@ -288,8 +288,9 @@ mod test { fn metadata() { let metadata = Runtime::metadata(); let (pallets, types) = match metadata.1 { - frame_support::metadata::RuntimeMetadata::V14(metadata) => - (metadata.pallets, metadata.types), + frame_support::metadata::RuntimeMetadata::V14(metadata) => { + (metadata.pallets, metadata.types) + }, _ => unreachable!(), }; diff --git a/frame/support/test/tests/pallet_compatibility_instance.rs b/frame/support/test/tests/pallet_compatibility_instance.rs index e8b5fe9fa33d4..762a2e729f1e3 100644 --- a/frame/support/test/tests/pallet_compatibility_instance.rs +++ b/frame/support/test/tests/pallet_compatibility_instance.rs @@ -292,8 +292,9 @@ mod test { fn metadata() { let metadata = Runtime::metadata(); let (pallets, types) = match metadata.1 { - frame_support::metadata::RuntimeMetadata::V14(metadata) => - (metadata.pallets, metadata.types), + frame_support::metadata::RuntimeMetadata::V14(metadata) => { + (metadata.pallets, metadata.types) + }, _ => unreachable!(), }; diff --git a/frame/support/test/tests/pallet_ui.rs b/frame/support/test/tests/pallet_ui.rs index 2db1d3cb0543a..428341f2262b7 100644 --- a/frame/support/test/tests/pallet_ui.rs +++ b/frame/support/test/tests/pallet_ui.rs @@ -21,7 +21,7 @@ fn pallet_ui() { // Only run the ui tests when `RUN_UI_TESTS` is set. if std::env::var("RUN_UI_TESTS").is_err() { - return + return; } // As trybuild is using `cargo check`, we don't need the real WASM binaries. diff --git a/frame/support/test/tests/storage_alias_ui.rs b/frame/support/test/tests/storage_alias_ui.rs index d45d071578dab..7243f7ea0206e 100644 --- a/frame/support/test/tests/storage_alias_ui.rs +++ b/frame/support/test/tests/storage_alias_ui.rs @@ -21,7 +21,7 @@ fn storage_alias_ui() { // Only run the ui tests when `RUN_UI_TESTS` is set. if std::env::var("RUN_UI_TESTS").is_err() { - return + return; } // As trybuild is using `cargo check`, we don't need the real WASM binaries. diff --git a/frame/system/src/extensions/check_non_zero_sender.rs b/frame/system/src/extensions/check_non_zero_sender.rs index 036f70c2fdd48..36e85202d68ea 100644 --- a/frame/system/src/extensions/check_non_zero_sender.rs +++ b/frame/system/src/extensions/check_non_zero_sender.rs @@ -83,7 +83,7 @@ where _len: usize, ) -> TransactionValidity { if who.using_encoded(|d| d.iter().all(|x| *x == 0)) { - return Err(TransactionValidityError::Invalid(InvalidTransaction::BadSigner)) + return Err(TransactionValidityError::Invalid(InvalidTransaction::BadSigner)); } Ok(ValidTransaction::default()) } diff --git a/frame/system/src/extensions/check_nonce.rs b/frame/system/src/extensions/check_nonce.rs index 1616a2d8a119e..601c053be5a24 100644 --- a/frame/system/src/extensions/check_nonce.rs +++ b/frame/system/src/extensions/check_nonce.rs @@ -86,7 +86,7 @@ where } else { InvalidTransaction::Future } - .into()) + .into()); } account.nonce += T::Index::one(); crate::Account::::insert(who, account); @@ -103,7 +103,7 @@ where // check index let account = crate::Account::::get(who); if self.0 < account.nonce { - return InvalidTransaction::Stale.into() + return InvalidTransaction::Stale.into(); } let provides = vec![Encode::encode(&(who, self.0))]; diff --git a/frame/system/src/extensions/check_weight.rs b/frame/system/src/extensions/check_weight.rs index 5c3b80f59bfa8..b0c39e63e01ff 100644 --- a/frame/system/src/extensions/check_weight.rs +++ b/frame/system/src/extensions/check_weight.rs @@ -50,8 +50,9 @@ where ) -> Result<(), TransactionValidityError> { let max = T::BlockWeights::get().get(info.class).max_extrinsic; match max { - Some(max) if info.weight.any_gt(max) => - Err(InvalidTransaction::ExhaustsResources.into()), + Some(max) if info.weight.any_gt(max) => { + Err(InvalidTransaction::ExhaustsResources.into()) + }, _ => Ok(()), } } @@ -146,8 +147,9 @@ where // Check if we don't exceed per-class allowance match limit_per_class.max_total { - Some(max) if per_class.any_gt(max) => - return Err(InvalidTransaction::ExhaustsResources.into()), + Some(max) if per_class.any_gt(max) => { + return Err(InvalidTransaction::ExhaustsResources.into()) + }, // There is no `max_total` limit (`None`), // or we are below the limit. _ => {}, @@ -158,8 +160,9 @@ where if all_weight.total().any_gt(maximum_weight.max_block) { match limit_per_class.reserved { // We are over the limit in reserved pool. - Some(reserved) if per_class.any_gt(reserved) => - return Err(InvalidTransaction::ExhaustsResources.into()), + Some(reserved) if per_class.any_gt(reserved) => { + return Err(InvalidTransaction::ExhaustsResources.into()) + }, // There is either no limit in reserved pool (`None`), // or we are below the limit. _ => {}, @@ -191,7 +194,7 @@ where len: usize, ) -> Result<(), TransactionValidityError> { if info.class == DispatchClass::Mandatory { - return Err(InvalidTransaction::MandatoryDispatch.into()) + return Err(InvalidTransaction::MandatoryDispatch.into()); } Self::do_pre_dispatch(info, len) } @@ -204,7 +207,7 @@ where len: usize, ) -> TransactionValidity { if info.class == DispatchClass::Mandatory { - return Err(InvalidTransaction::MandatoryDispatch.into()) + return Err(InvalidTransaction::MandatoryDispatch.into()); } Self::do_validate(info, len) } @@ -237,7 +240,7 @@ where // extrinsics that result in error. if let (DispatchClass::Mandatory, Err(e)) = (info.class, result) { log::error!(target: "runtime::system", "Bad mandatory: {:?}", e); - return Err(InvalidTransaction::BadMandatory.into()) + return Err(InvalidTransaction::BadMandatory.into()); } let unspent = post_info.calc_unspent(info); @@ -321,8 +324,8 @@ mod tests { fn normal_extrinsic_limited_by_maximum_extrinsic_weight() { new_test_ext().execute_with(|| { let max = DispatchInfo { - weight: block_weights().get(DispatchClass::Normal).max_extrinsic.unwrap() + - Weight::from_ref_time(1), + weight: block_weights().get(DispatchClass::Normal).max_extrinsic.unwrap() + + Weight::from_ref_time(1), class: DispatchClass::Normal, ..Default::default() }; @@ -612,9 +615,9 @@ mod tests { let pre = CheckWeight::(PhantomData).pre_dispatch(&1, CALL, &info, len).unwrap(); assert_eq!( BlockWeight::::get().total(), - info.weight + - Weight::from_ref_time(128) + - block_weights().get(DispatchClass::Normal).base_extrinsic, + info.weight + + Weight::from_ref_time(128) + + block_weights().get(DispatchClass::Normal).base_extrinsic, ); assert_ok!(CheckWeight::::post_dispatch( @@ -626,9 +629,9 @@ mod tests { )); assert_eq!( BlockWeight::::get().total(), - info.weight + - Weight::from_ref_time(128) + - block_weights().get(DispatchClass::Normal).base_extrinsic, + info.weight + + Weight::from_ref_time(128) + + block_weights().get(DispatchClass::Normal).base_extrinsic, ); }) } diff --git a/frame/system/src/lib.rs b/frame/system/src/lib.rs index 477ebb97fbd95..4b0607baff61a 100644 --- a/frame/system/src/lib.rs +++ b/frame/system/src/lib.rs @@ -1219,7 +1219,7 @@ impl Pallet { let block_number = Self::block_number(); // Don't populate events on genesis. if block_number.is_zero() { - return + return; } let phase = ExecutionPhase::::get().unwrap_or_default(); @@ -1561,11 +1561,11 @@ impl Pallet { .ok_or(Error::::FailedToExtractRuntimeVersion)?; if new_version.spec_name != current_version.spec_name { - return Err(Error::::InvalidSpecName.into()) + return Err(Error::::InvalidSpecName.into()); } if new_version.spec_version <= current_version.spec_version { - return Err(Error::::SpecVersionNeedsToIncrease.into()) + return Err(Error::::SpecVersionNeedsToIncrease.into()); } Ok(()) @@ -1650,7 +1650,7 @@ impl StoredMap for Pallet { }, } } else if !was_providing && !is_providing { - return Ok(result) + return Ok(result); } Account::::mutate(k, |a| a.data = some_data.unwrap_or_default()); Ok(result) diff --git a/frame/system/src/offchain.rs b/frame/system/src/offchain.rs index 99a4c1541d30f..14bf4b2203373 100644 --- a/frame/system/src/offchain.rs +++ b/frame/system/src/offchain.rs @@ -207,7 +207,7 @@ impl> Signer, I: 'static>(length: u32) -> (T::AccountId, Vec, T::AccountId) { let caller = whitelisted_caller(); - let value = T::TipReportDepositBase::get() + - T::DataDepositPerByte::get() * length.into() + - T::Currency::minimum_balance(); + let value = T::TipReportDepositBase::get() + + T::DataDepositPerByte::get() * length.into() + + T::Currency::minimum_balance(); let _ = T::Currency::make_free_balance_be(&caller, value); let reason = vec![0; length as usize]; let awesome_person = account("awesome", 0, SEED); diff --git a/frame/tips/src/lib.rs b/frame/tips/src/lib.rs index 9313a26e52e00..74233e1dec5e1 100644 --- a/frame/tips/src/lib.rs +++ b/frame/tips/src/lib.rs @@ -254,8 +254,8 @@ pub mod pallet { let hash = T::Hashing::hash_of(&(&reason_hash, &who)); ensure!(!Tips::::contains_key(&hash), Error::::AlreadyKnown); - let deposit = T::TipReportDepositBase::get() + - T::DataDepositPerByte::get() * (reason.len() as u32).into(); + let deposit = T::TipReportDepositBase::get() + + T::DataDepositPerByte::get() * (reason.len() as u32).into(); T::Currency::reserve(&finder, deposit)?; Reasons::::insert(&reason_hash, &reason); @@ -510,9 +510,9 @@ impl, I: 'static> Pallet { Some(m) => { member = members_iter.next(); if m < a { - continue + continue; } else { - break true + break true; } }, } diff --git a/frame/tips/src/migrations/v4.rs b/frame/tips/src/migrations/v4.rs index 5e10fa7dd2c6d..c0aacf8417405 100644 --- a/frame/tips/src/migrations/v4.rs +++ b/frame/tips/src/migrations/v4.rs @@ -49,7 +49,7 @@ pub fn migrate::on_chain_storage_version(); @@ -109,7 +109,7 @@ pub fn pre_migrate< log_migration("pre-migration", storage_prefix_reasons, old_pallet_name, new_pallet_name); if new_pallet_name == old_pallet_name { - return + return; } let new_pallet_prefix = twox_128(new_pallet_name.as_bytes()); @@ -148,7 +148,7 @@ pub fn post_migrate< log_migration("post-migration", storage_prefix_reasons, old_pallet_name, new_pallet_name); if new_pallet_name == old_pallet_name { - return + return; } // Assert that no `Tips` and `Reasons` storages remains at the old prefix. diff --git a/frame/transaction-payment/asset-tx-payment/src/payment.rs b/frame/transaction-payment/asset-tx-payment/src/payment.rs index 80ff4e40dcffa..f7905cd6fcdd6 100644 --- a/frame/transaction-payment/asset-tx-payment/src/payment.rs +++ b/frame/transaction-payment/asset-tx-payment/src/payment.rs @@ -130,7 +130,7 @@ where let can_withdraw = >::can_withdraw(asset_id, who, converted_fee); if !matches!(can_withdraw, WithdrawConsequence::Success) { - return Err(InvalidTransaction::Payment.into()) + return Err(InvalidTransaction::Payment.into()); } >::withdraw(asset_id, who, converted_fee) .map_err(|_| TransactionValidityError::from(InvalidTransaction::Payment)) diff --git a/frame/transaction-payment/src/lib.rs b/frame/transaction-payment/src/lib.rs index ce747fa6bd85c..d271f585900d7 100644 --- a/frame/transaction-payment/src/lib.rs +++ b/frame/transaction-payment/src/lib.rs @@ -394,15 +394,15 @@ pub mod pallet { // at most be maximum block weight. Make sure that this can fit in a multiplier without // loss. assert!( - ::max_value() >= - Multiplier::checked_from_integer::( + ::max_value() + >= Multiplier::checked_from_integer::( T::BlockWeights::get().max_block.ref_time().try_into().unwrap() ) .unwrap(), ); - let target = T::FeeMultiplierUpdate::target() * - T::BlockWeights::get().get(DispatchClass::Normal).max_total.expect( + let target = T::FeeMultiplierUpdate::target() + * T::BlockWeights::get().get(DispatchClass::Normal).max_total.expect( "Setting `max_total` for `Normal` dispatch class is not compatible with \ `transaction-payment` pallet.", ); @@ -411,7 +411,7 @@ pub mod pallet { if addition == Weight::zero() { // this is most likely because in a test setup we set everything to () // or to `ConstFeeMultiplier`. - return + return; } #[cfg(any(feature = "std", test))] @@ -1178,8 +1178,9 @@ mod tests { // fee will be proportional to what is the actual maximum weight in the runtime. assert_eq!( Balances::free_balance(&1), - (10000 - - ::BlockWeights::get().max_block.ref_time()) as u64 + (10000 + - ::BlockWeights::get().max_block.ref_time()) + as u64 ); }); } diff --git a/frame/transaction-payment/src/payment.rs b/frame/transaction-payment/src/payment.rs index ebc9c5c5afd62..97b92d4b9e34f 100644 --- a/frame/transaction-payment/src/payment.rs +++ b/frame/transaction-payment/src/payment.rs @@ -98,7 +98,7 @@ where tip: Self::Balance, ) -> Result { if fee.is_zero() { - return Ok(None) + return Ok(None); } let withdraw_reason = if tip.is_zero() { diff --git a/frame/transaction-storage/src/lib.rs b/frame/transaction-storage/src/lib.rs index 07144c5617113..f02cae80e9ad8 100644 --- a/frame/transaction-storage/src/lib.rs +++ b/frame/transaction-storage/src/lib.rs @@ -211,7 +211,7 @@ pub mod pallet { let mut index = 0; >::mutate(|transactions| { if transactions.len() + 1 > T::MaxBlockTransactions::get() as usize { - return Err(Error::::TooManyTransactions) + return Err(Error::::TooManyTransactions); } let total_chunks = transactions.last().map_or(0, |t| t.block_chunks) + chunk_count; index = transactions.len() as u32; @@ -255,7 +255,7 @@ pub mod pallet { let mut index = 0; >::mutate(|transactions| { if transactions.len() + 1 > T::MaxBlockTransactions::get() as usize { - return Err(Error::::TooManyTransactions) + return Err(Error::::TooManyTransactions); } let chunks = num_chunks(info.size); let total_chunks = transactions.last().map_or(0, |t| t.block_chunks) + chunks; diff --git a/frame/uniques/src/impl_nonfungibles.rs b/frame/uniques/src/impl_nonfungibles.rs index 75d193ad19605..f2d44a7e88e21 100644 --- a/frame/uniques/src/impl_nonfungibles.rs +++ b/frame/uniques/src/impl_nonfungibles.rs @@ -137,7 +137,7 @@ impl, I: 'static> Mutate<::AccountId> for Pallet Self::do_burn(*collection, *item, |_, d| { if let Some(check_owner) = maybe_check_owner { if &d.owner != check_owner { - return Err(Error::::NoPermission.into()) + return Err(Error::::NoPermission.into()); } } Ok(()) diff --git a/frame/uniques/src/lib.rs b/frame/uniques/src/lib.rs index 185d8fc0c8edd..0bc1c8bf22b86 100644 --- a/frame/uniques/src/lib.rs +++ b/frame/uniques/src/lib.rs @@ -687,10 +687,10 @@ pub mod pallet { if T::Currency::reserve(&collection_details.owner, deposit - old).is_err() { // NOTE: No alterations made to collection_details in this iteration so far, // so this is OK to do. - continue + continue; } } else { - continue + continue; } collection_details.total_deposit.saturating_accrue(deposit); collection_details.total_deposit.saturating_reduce(old); @@ -851,7 +851,7 @@ pub mod pallet { let details = maybe_details.as_mut().ok_or(Error::::UnknownCollection)?; ensure!(origin == details.owner, Error::::NoPermission); if details.owner == owner { - return Ok(()) + return Ok(()); } // Move the deposit to the new owner. diff --git a/frame/utility/src/lib.rs b/frame/utility/src/lib.rs index 41710be930b90..769c69475b9f2 100644 --- a/frame/utility/src/lib.rs +++ b/frame/utility/src/lib.rs @@ -132,9 +132,9 @@ pub mod pallet { /// The limit on the number of batched calls. fn batched_calls_limit() -> u32 { let allocator_limit = sp_core::MAX_POSSIBLE_ALLOCATION; - let call_size = ((sp_std::mem::size_of::<::RuntimeCall>() as u32 + - CALL_ALIGN - 1) / CALL_ALIGN) * - CALL_ALIGN; + let call_size = ((sp_std::mem::size_of::<::RuntimeCall>() as u32 + + CALL_ALIGN - 1) + / CALL_ALIGN) * CALL_ALIGN; // The margin to take into account vec doubling capacity. let margin_factor = 3; @@ -205,7 +205,7 @@ pub mod pallet { ) -> DispatchResultWithPostInfo { // Do not allow the `None` origin. if ensure_none(origin.clone()).is_ok() { - return Err(BadOrigin.into()) + return Err(BadOrigin.into()); } let is_root = ensure_root(origin.clone()).is_ok(); @@ -232,7 +232,7 @@ pub mod pallet { // Take the weight of this function itself into account. let base_weight = T::WeightInfo::batch(index.saturating_add(1) as u32); // Return the actual used weight + base_weight of this call. - return Ok(Some(base_weight + weight).into()) + return Ok(Some(base_weight + weight).into()); } Self::deposit_event(Event::ItemCompleted); } @@ -326,7 +326,7 @@ pub mod pallet { ) -> DispatchResultWithPostInfo { // Do not allow the `None` origin. if ensure_none(origin.clone()).is_ok() { - return Err(BadOrigin.into()) + return Err(BadOrigin.into()); } let is_root = ensure_root(origin.clone()).is_ok(); @@ -438,7 +438,7 @@ pub mod pallet { ) -> DispatchResultWithPostInfo { // Do not allow the `None` origin. if ensure_none(origin.clone()).is_ok() { - return Err(BadOrigin.into()) + return Err(BadOrigin.into()); } let is_root = ensure_root(origin.clone()).is_ok(); diff --git a/frame/vesting/src/lib.rs b/frame/vesting/src/lib.rs index a92f94baf6cf9..bf13793a3b43a 100644 --- a/frame/vesting/src/lib.rs +++ b/frame/vesting/src/lib.rs @@ -428,7 +428,7 @@ pub mod pallet { ) -> DispatchResult { let who = ensure_signed(origin)?; if schedule1_index == schedule2_index { - return Ok(()) + return Ok(()); }; let schedule1_index = schedule1_index as usize; let schedule2_index = schedule2_index as usize; @@ -506,7 +506,7 @@ impl Pallet { // Validate user inputs. ensure!(schedule.locked() >= T::MinVestedTransfer::get(), Error::::AmountLow); if !schedule.is_valid() { - return Err(Error::::InvalidScheduleParams.into()) + return Err(Error::::InvalidScheduleParams.into()); }; let target = T::Lookup::lookup(target)?; let source = T::Lookup::lookup(source)?; @@ -654,8 +654,8 @@ impl Pallet { }; debug_assert!( - locked_now > Zero::zero() && schedules.len() > 0 || - locked_now == Zero::zero() && schedules.len() == 0 + locked_now > Zero::zero() && schedules.len() > 0 + || locked_now == Zero::zero() && schedules.len() == 0 ); Ok((schedules, locked_now)) @@ -701,13 +701,13 @@ where starting_block: T::BlockNumber, ) -> DispatchResult { if locked.is_zero() { - return Ok(()) + return Ok(()); } let vesting_schedule = VestingInfo::new(locked, per_block, starting_block); // Check for `per_block` or `locked` of 0. if !vesting_schedule.is_valid() { - return Err(Error::::InvalidScheduleParams.into()) + return Err(Error::::InvalidScheduleParams.into()); }; let mut schedules = Self::vesting(who).unwrap_or_default(); @@ -735,7 +735,7 @@ where ) -> DispatchResult { // Check for `per_block` or `locked` of 0. if !VestingInfo::new(locked, per_block, starting_block).is_valid() { - return Err(Error::::InvalidScheduleParams.into()) + return Err(Error::::InvalidScheduleParams.into()); } ensure!( diff --git a/frame/vesting/src/tests.rs b/frame/vesting/src/tests.rs index cbc2e09c83199..41d01a1c3079a 100644 --- a/frame/vesting/src/tests.rs +++ b/frame/vesting/src/tests.rs @@ -1131,16 +1131,16 @@ fn vested_transfer_less_than_existential_deposit_fails() { ExtBuilder::default().existential_deposit(4 * ED).build().execute_with(|| { // MinVestedTransfer is less the ED. assert!( - ::Currency::minimum_balance() > - ::MinVestedTransfer::get() + ::Currency::minimum_balance() + > ::MinVestedTransfer::get() ); let sched = VestingInfo::new(::MinVestedTransfer::get() as u64, 1u64, 10u64); // The new account balance with the schedule's locked amount would be less than ED. assert!( - Balances::free_balance(&99) + sched.locked() < - ::Currency::minimum_balance() + Balances::free_balance(&99) + sched.locked() + < ::Currency::minimum_balance() ); // vested_transfer fails. diff --git a/frame/vesting/src/vesting_info.rs b/frame/vesting/src/vesting_info.rs index 9069b69482769..2a05f25be491a 100644 --- a/frame/vesting/src/vesting_info.rs +++ b/frame/vesting/src/vesting_info.rs @@ -99,8 +99,8 @@ where // the block after starting. One::one() } else { - self.locked / self.per_block() + - if (self.locked % self.per_block()).is_zero() { + self.locked / self.per_block() + + if (self.locked % self.per_block()).is_zero() { Zero::zero() } else { // `per_block` does not perfectly divide `locked`, so we need an extra block to diff --git a/primitives/api/proc-macro/src/decl_runtime_apis.rs b/primitives/api/proc-macro/src/decl_runtime_apis.rs index 8d46047dbda5a..c71e7c0df584e 100644 --- a/primitives/api/proc-macro/src/decl_runtime_apis.rs +++ b/primitives/api/proc-macro/src/decl_runtime_apis.rs @@ -190,7 +190,7 @@ fn parse_renamed_attribute(renamed: &Attribute) -> Result<(String, u32)> { ); match meta { - Meta::List(list) => + Meta::List(list) => { if list.nested.len() > 2 && list.nested.is_empty() { err } else { @@ -206,7 +206,8 @@ fn parse_renamed_attribute(renamed: &Attribute) -> Result<(String, u32)> { }; Ok((old_name, version)) - }, + } + }, _ => err, } } @@ -679,7 +680,7 @@ impl CheckTraitDecl { Ok(r) => r, Err(e) => { self.errors.push(e); - return + return; }, }; @@ -721,12 +722,13 @@ impl<'ast> Visit<'ast> for CheckTraitDecl { fn visit_generic_param(&mut self, input: &'ast GenericParam) { match input { - GenericParam::Type(ty) if ty.ident == BLOCK_GENERIC_IDENT => + GenericParam::Type(ty) if ty.ident == BLOCK_GENERIC_IDENT => { self.errors.push(Error::new( input.span(), "`Block: BlockT` generic parameter will be added automatically by the \ `decl_runtime_apis!` macro!", - )), + )) + }, _ => {}, } diff --git a/primitives/api/proc-macro/src/impl_runtime_apis.rs b/primitives/api/proc-macro/src/impl_runtime_apis.rs index c3f4e36655d22..7656c710fb950 100644 --- a/primitives/api/proc-macro/src/impl_runtime_apis.rs +++ b/primitives/api/proc-macro/src/impl_runtime_apis.rs @@ -366,7 +366,7 @@ fn extend_with_api_version(mut trait_: Path, version: Option) -> Path { v } else { // nothing to do - return trait_ + return trait_; }; let trait_name = &mut trait_ @@ -598,7 +598,7 @@ fn generate_runtime_api_versions(impls: &[ItemImpl]) -> Result { "Two traits with the same name detected! \ The trait name is used to generate its ID. \ Please rename one trait at the declaration!", - )) + )); } let id: Path = parse_quote!( #path ID ); @@ -681,7 +681,7 @@ fn extract_api_version(attrs: &Vec, span: Span) -> Result Each runtime API can have only one version.", API_VERSION_ATTRIBUTE ), - )) + )); } // Parse the runtime version if there exists one. diff --git a/primitives/api/proc-macro/src/mock_impl_runtime_apis.rs b/primitives/api/proc-macro/src/mock_impl_runtime_apis.rs index e43a302e18923..27c91857d0f93 100644 --- a/primitives/api/proc-macro/src/mock_impl_runtime_apis.rs +++ b/primitives/api/proc-macro/src/mock_impl_runtime_apis.rs @@ -225,7 +225,7 @@ fn get_at_param_name( takes at least one argument, the `BlockId`.", ADVANCED_ATTRIBUTE, ), - )) + )); } // `param_names` and `param_types` have the same length, so if `param_names` is not empty @@ -236,7 +236,7 @@ fn get_at_param_name( return Err(Error::new( span, "`BlockId` needs to be taken by reference and not by value!", - )) + )); } let name = param_names.remove(0); @@ -415,7 +415,7 @@ fn generate_runtime_api_impls(impls: &[ItemImpl]) -> Result + Some(self_ty) => { if self_ty == impl_.self_ty { Some(self_ty) } else { @@ -426,13 +426,14 @@ fn generate_runtime_api_impls(impls: &[ItemImpl]) -> Result Some(impl_.self_ty.clone()), }; global_block_type = match global_block_type.take() { - Some(global_block_type) => + Some(global_block_type) => { if global_block_type == *block_type { Some(global_block_type) } else { @@ -446,8 +447,9 @@ fn generate_runtime_api_impls(impls: &[ItemImpl]) -> Result Some(block_type.clone()), }; diff --git a/primitives/api/proc-macro/src/utils.rs b/primitives/api/proc-macro/src/utils.rs index 2ccd050cfb151..737015cc7e662 100644 --- a/primitives/api/proc-macro/src/utils.rs +++ b/primitives/api/proc-macro/src/utils.rs @@ -151,12 +151,14 @@ pub fn extract_parameter_names_types_and_borrows( generate_unique_pattern((*arg.pat).clone(), &mut generated_pattern_counter); result.push((name, ty, borrow)); }, - FnArg::Receiver(_) if matches!(allow_self, AllowSelfRefInParameters::No) => - return Err(Error::new(input.span(), "`self` parameter not supported!")), - FnArg::Receiver(recv) => + FnArg::Receiver(_) if matches!(allow_self, AllowSelfRefInParameters::No) => { + return Err(Error::new(input.span(), "`self` parameter not supported!")) + }, + FnArg::Receiver(recv) => { if recv.mutability.is_some() || recv.reference.is_none() { - return Err(Error::new(recv.span(), "Only `&self` is supported!")) - }, + return Err(Error::new(recv.span(), "Only `&self` is supported!")); + } + }, } } @@ -222,8 +224,9 @@ pub fn extract_block_type_from_trait_path(trait_: &Path) -> Result<&TypePath> { let span = trait_.segments.last().as_ref().unwrap().span(); Err(Error::new(span, "Missing `Block` generic parameter.")) }, - PathArguments::Parenthesized(_) => - Err(Error::new(generics.arguments.span(), "Unexpected parentheses in path!")), + PathArguments::Parenthesized(_) => { + Err(Error::new(generics.arguments.span(), "Unexpected parentheses in path!")) + }, } } diff --git a/primitives/api/test/tests/trybuild.rs b/primitives/api/test/tests/trybuild.rs index 13af1ded7dc6b..7ef35f6452555 100644 --- a/primitives/api/test/tests/trybuild.rs +++ b/primitives/api/test/tests/trybuild.rs @@ -22,7 +22,7 @@ use std::env; fn ui() { // Only run the ui tests when `RUN_UI_TESTS` is set. if env::var("RUN_UI_TESTS").is_err() { - return + return; } // As trybuild is using `cargo check`, we don't need the real WASM binaries. diff --git a/primitives/arithmetic/fuzzer/src/biguint.rs b/primitives/arithmetic/fuzzer/src/biguint.rs index f49743a4b8a69..61fd3fff9d5db 100644 --- a/primitives/arithmetic/fuzzer/src/biguint.rs +++ b/primitives/arithmetic/fuzzer/src/biguint.rs @@ -106,7 +106,7 @@ fn main() { if check_digit_lengths(&u, &v, 4) { let (ue, ve) = (ue.unwrap(), ve.unwrap()); if ve == 0 { - return + return; } let (q, r) = (ue / ve, ue % ve); if let Some((qq, rr)) = u.clone().div(&v, true) { diff --git a/primitives/arithmetic/fuzzer/src/fixed_point.rs b/primitives/arithmetic/fuzzer/src/fixed_point.rs index c1b93f8c63a11..de8f046d8bb26 100644 --- a/primitives/arithmetic/fuzzer/src/fixed_point.rs +++ b/primitives/arithmetic/fuzzer/src/fixed_point.rs @@ -76,8 +76,8 @@ fn main() { let a = FixedI64::saturating_from_rational(2, 5); let b = a.saturating_mul_acc_int(x); let xx = FixedI64::saturating_from_integer(x); - let d = a.saturating_mul(xx).saturating_add(xx).into_inner() as i128 / - FixedI64::accuracy() as i128; + let d = a.saturating_mul(xx).saturating_add(xx).into_inner() as i128 + / FixedI64::accuracy() as i128; assert_eq!(b, d); }); } diff --git a/primitives/arithmetic/fuzzer/src/multiply_by_rational_with_rounding.rs b/primitives/arithmetic/fuzzer/src/multiply_by_rational_with_rounding.rs index 474b2d363eccd..c5939fd7601c1 100644 --- a/primitives/arithmetic/fuzzer/src/multiply_by_rational_with_rounding.rs +++ b/primitives/arithmetic/fuzzer/src/multiply_by_rational_with_rounding.rs @@ -62,7 +62,7 @@ fn main() { fn mul_div(a: u128, b: u128, c: u128) -> u128 { use primitive_types::U256; if a.is_zero() { - return Zero::zero() + return Zero::zero(); } let c = c.max(1); diff --git a/primitives/arithmetic/fuzzer/src/normalize.rs b/primitives/arithmetic/fuzzer/src/normalize.rs index 3d2d6eb9acfcc..afd92b05517c5 100644 --- a/primitives/arithmetic/fuzzer/src/normalize.rs +++ b/primitives/arithmetic/fuzzer/src/normalize.rs @@ -36,7 +36,7 @@ fn main() { fuzz!(|data: (Vec, Ty)| { let (data, norm) = data; if data.is_empty() { - return + return; } let pre_sum: u128 = data.iter().map(|x| *x as u128).sum(); diff --git a/primitives/arithmetic/src/biguint.rs b/primitives/arithmetic/src/biguint.rs index 33f0960ee378c..ded04f09ea7a1 100644 --- a/primitives/arithmetic/src/biguint.rs +++ b/primitives/arithmetic/src/biguint.rs @@ -159,7 +159,7 @@ impl BigUint { // has the ability to cause this. There is nothing to do if the number already has 1 // limb only. call it a day and return. if self.len().is_zero() { - return + return; } let index = self.digits.iter().position(|&elem| elem != 0).unwrap_or(self.len() - 1); @@ -173,7 +173,7 @@ impl BigUint { pub fn lpad(&mut self, size: usize) { let n = self.len(); if n >= size { - return + return; } let pad = size - n; let mut new_digits = (0..pad).map(|_| 0).collect::>(); @@ -266,15 +266,15 @@ impl BigUint { if self.get(j) == 0 { // Note: `with_capacity` allocates with 0. Explicitly set j + m to zero if // otherwise. - continue + continue; } let mut k = 0; for i in 0..m { // PROOF: (B−1) × (B−1) + (B−1) + (B−1) = B^2 −1 < B^2. addition is safe. - let t = mul_single(self.get(j), other.get(i)) + - Double::from(w.get(i + j)) + - Double::from(k); + let t = mul_single(self.get(j), other.get(i)) + + Double::from(w.get(i + j)) + + Double::from(k); w.set(i + j, (t % B) as Single); // PROOF: (B^2 - 1) / B < B. conversion is safe. k = (t / B) as Single; @@ -318,7 +318,7 @@ impl BigUint { /// Taken from "The Art of Computer Programming" by D.E. Knuth, vol 2, chapter 4. pub fn div(self, other: &Self, rem: bool) -> Option<(Self, Self)> { if other.len() <= 1 || other.msb() == 0 || self.msb() == 0 || self.len() <= other.len() { - return None + return None; } let n = other.len(); let m = self.len() - n; @@ -378,7 +378,7 @@ impl BigUint { test(); while (*rhat.borrow() as Double) < B { if !test() { - break + break; } } diff --git a/primitives/arithmetic/src/fixed_point.rs b/primitives/arithmetic/src/fixed_point.rs index bf3c93cdad605..5f73d64591f4e 100644 --- a/primitives/arithmetic/src/fixed_point.rs +++ b/primitives/arithmetic/src/fixed_point.rs @@ -144,7 +144,7 @@ pub trait FixedPointNumber: d: D, ) -> Option { if d == D::zero() { - return None + return None; } let n: I129 = n.into(); @@ -529,10 +529,10 @@ macro_rules! implement_fixed { /// `None` is returned. pub const fn try_sqrt(self) -> Option { if self.0 == 0 { - return Some(Self(0)) + return Some(Self(0)); } if self.0 < 1 { - return None + return None; } let v = self.0 as u128; @@ -623,7 +623,7 @@ macro_rules! implement_fixed { } else { let unsigned_inner = n.value as $inner_type; if unsigned_inner as u128 != n.value || (unsigned_inner > 0) != (n.value > 0) { - return None + return None; }; if n.negative { match unsigned_inner.checked_neg() { @@ -717,7 +717,7 @@ macro_rules! implement_fixed { rounding: SignedRounding, ) -> Option { if other.0 == 0 { - return None + return None; } let lhs = self.into_i129(); @@ -751,7 +751,7 @@ macro_rules! implement_fixed { fn saturating_pow(self, exp: usize) -> Self { if exp == 0 { - return Self::saturating_from_integer(1) + return Self::saturating_from_integer(1); } let exp = exp as u32; @@ -829,7 +829,7 @@ macro_rules! implement_fixed { impl CheckedDiv for $name { fn checked_div(&self, other: &Self) -> Option { if other.0 == 0 { - return None + return None; } let lhs: I129 = self.0.into(); diff --git a/primitives/arithmetic/src/helpers_128bit.rs b/primitives/arithmetic/src/helpers_128bit.rs index 7938c31d1eaa6..b100aa09a55aa 100644 --- a/primitives/arithmetic/src/helpers_128bit.rs +++ b/primitives/arithmetic/src/helpers_128bit.rs @@ -155,7 +155,7 @@ mod double128 { pub const fn div(mut self, rhs: u128) -> (Self, u128) { if rhs == 1 { - return (self, 0) + return (self, 0); } // (self === a; rhs === b) @@ -192,7 +192,7 @@ pub const fn multiply_by_rational_with_rounding( ) -> Option { use double128::Double128; if c == 0 { - return None + return None; } let (result, remainder) = Double128::product_of(a, b).div(c); let mut result: u128 = match result.try_into_u128() { @@ -217,7 +217,7 @@ pub const fn multiply_by_rational_with_rounding( pub const fn sqrt(mut n: u128) -> u128 { // Modified from https://github.com/derekdreery/integer-sqrt-rs (Apache/MIT). if n == 0 { - return 0 + return 0; } // Compute bit, the largest power of 4 <= n diff --git a/primitives/arithmetic/src/lib.rs b/primitives/arithmetic/src/lib.rs index 244242c0f7580..50f2af2c1b175 100644 --- a/primitives/arithmetic/src/lib.rs +++ b/primitives/arithmetic/src/lib.rs @@ -68,7 +68,7 @@ where fn tcmp(&self, other: &T, threshold: T) -> Ordering { // early exit. if threshold.is_zero() { - return self.cmp(other) + return self.cmp(other); } let upper_bound = other.saturating_add(threshold); @@ -173,12 +173,12 @@ where // Nothing to do here. if count.is_zero() { - return Ok(Vec::::new()) + return Ok(Vec::::new()); } let diff = targeted_sum.max(sum) - targeted_sum.min(sum); if diff.is_zero() { - return Ok(input.to_vec()) + return Ok(input.to_vec()); } let needs_bump = targeted_sum > sum; diff --git a/primitives/arithmetic/src/per_things.rs b/primitives/arithmetic/src/per_things.rs index 2932a742063db..c24bbd300d175 100644 --- a/primitives/arithmetic/src/per_things.rs +++ b/primitives/arithmetic/src/per_things.rs @@ -92,7 +92,7 @@ pub trait PerThing: /// Return the next lower value to `self` or `self` if it is already zero. fn less_epsilon(self) -> Self { if self.is_zero() { - return self + return self; } Self::from_parts(self.deconstruct() - One::one()) } @@ -101,7 +101,7 @@ pub trait PerThing: /// zero. fn try_less_epsilon(self) -> Result { if self.is_zero() { - return Err(self) + return Err(self); } Ok(Self::from_parts(self.deconstruct() - One::one())) } @@ -109,7 +109,7 @@ pub trait PerThing: /// Return the next higher value to `self` or `self` if it is already one. fn plus_epsilon(self) -> Self { if self.is_one() { - return self + return self; } Self::from_parts(self.deconstruct() + One::one()) } @@ -118,7 +118,7 @@ pub trait PerThing: /// one. fn try_plus_epsilon(self) -> Result { if self.is_one() { - return Err(self) + return Err(self); } Ok(Self::from_parts(self.deconstruct() + One::one())) } @@ -464,10 +464,12 @@ impl Rounding { match (rounding, negative) { (Low, true) | (Major, _) | (High, false) => Up, (High, true) | (Minor, _) | (Low, false) => Down, - (NearestPrefMajor, _) | (NearestPrefHigh, false) | (NearestPrefLow, true) => - NearestPrefUp, - (NearestPrefMinor, _) | (NearestPrefLow, false) | (NearestPrefHigh, true) => - NearestPrefDown, + (NearestPrefMajor, _) | (NearestPrefHigh, false) | (NearestPrefLow, true) => { + NearestPrefUp + }, + (NearestPrefMinor, _) | (NearestPrefLow, false) | (NearestPrefHigh, true) => { + NearestPrefDown + }, } } } @@ -547,16 +549,18 @@ where rem_mul_div_inner += 1.into(); } }, - Rounding::NearestPrefDown => + Rounding::NearestPrefDown => { if rem_mul_upper % denom_upper > denom_upper / 2.into() { // `rem * numer / denom` is less than `numer`, so this will not overflow. rem_mul_div_inner += 1.into(); - }, - Rounding::NearestPrefUp => + } + }, + Rounding::NearestPrefUp => { if rem_mul_upper % denom_upper >= denom_upper / 2.into() + denom_upper % 2.into() { // `rem * numer / denom` is less than `numer`, so this will not overflow. rem_mul_div_inner += 1.into(); - }, + } + }, } rem_mul_div_inner.into() } diff --git a/primitives/arithmetic/src/rational.rs b/primitives/arithmetic/src/rational.rs index 54cabfc6214e8..55f57f1670469 100644 --- a/primitives/arithmetic/src/rational.rs +++ b/primitives/arithmetic/src/rational.rs @@ -166,7 +166,7 @@ impl Rational128 { pub fn lcm(&self, other: &Self) -> Option { // this should be tested better: two large numbers that are almost the same. if self.1 == other.1 { - return Some(self.1) + return Some(self.1); } let g = helpers_128bit::gcd(self.1, other.1); helpers_128bit::multiply_by_rational_with_rounding( @@ -300,7 +300,7 @@ mod tests { fn mul_div(a: u128, b: u128, c: u128) -> u128 { use primitive_types::U256; if a.is_zero() { - return Zero::zero() + return Zero::zero(); } let c = c.max(1); diff --git a/primitives/authorship/src/lib.rs b/primitives/authorship/src/lib.rs index 7ea19d9ea5ff5..bd996db7b3d01 100644 --- a/primitives/authorship/src/lib.rs +++ b/primitives/authorship/src/lib.rs @@ -91,7 +91,7 @@ impl sp_inherents::InherentDataProvider for InherentDataProvider mut error: &[u8], ) -> Option> { if *identifier != INHERENT_IDENTIFIER { - return None + return None; } let error = InherentError::decode(&mut error).ok()?; diff --git a/primitives/blockchain/src/backend.rs b/primitives/blockchain/src/backend.rs index dea3a7f285117..31cc4c46d4747 100644 --- a/primitives/blockchain/src/backend.rs +++ b/primitives/blockchain/src/backend.rs @@ -139,7 +139,7 @@ pub trait Backend: if let Some(max_number) = maybe_max_number { // target outside search range if target_header.number() > &max_number { - return Ok(None) + return Ok(None); } } @@ -160,12 +160,12 @@ pub trait Backend: // provided, we continue to search from all leaves below. if let Some(max_number) = maybe_max_number { if let Some(header) = self.hash(max_number)? { - return Ok(Some(header)) + return Ok(Some(header)); } } } else if info.finalized_number >= *target_header.number() { // header is on a dead fork. - return Ok(None) + return Ok(None); } self.leaves()? @@ -189,7 +189,7 @@ pub trait Backend: if current_header.number() <= &max_number { best_hash = current_header.hash(); - break + break; } current_hash = *current_header.parent_hash(); @@ -200,7 +200,7 @@ pub trait Backend: loop { // until we find target if current_hash == target_hash { - return Ok(Some(best_hash)) + return Ok(Some(best_hash)); } let current_header = self @@ -209,7 +209,7 @@ pub trait Backend: // stop search in this chain once we go below the target's block number if current_header.number() < target_header.number() { - break + break; } current_hash = *current_header.parent_hash(); diff --git a/primitives/blockchain/src/header_metadata.rs b/primitives/blockchain/src/header_metadata.rs index 87ac44459987e..f810aba529731 100644 --- a/primitives/blockchain/src/header_metadata.rs +++ b/primitives/blockchain/src/header_metadata.rs @@ -39,12 +39,12 @@ pub fn lowest_common_ancestor + ?Sized>( ) -> Result, T::Error> { let mut header_one = backend.header_metadata(id_one)?; if header_one.parent == id_two { - return Ok(HashAndNumber { hash: id_two, number: header_one.number - One::one() }) + return Ok(HashAndNumber { hash: id_two, number: header_one.number - One::one() }); } let mut header_two = backend.header_metadata(id_two)?; if header_two.parent == id_one { - return Ok(HashAndNumber { hash: id_one, number: header_one.number }) + return Ok(HashAndNumber { hash: id_one, number: header_one.number }); } let mut orig_header_one = header_one.clone(); @@ -58,7 +58,7 @@ pub fn lowest_common_ancestor + ?Sized>( if ancestor_one.number >= header_two.number { header_one = ancestor_one; } else { - break + break; } } @@ -68,7 +68,7 @@ pub fn lowest_common_ancestor + ?Sized>( if ancestor_two.number >= header_one.number { header_two = ancestor_two; } else { - break + break; } } diff --git a/primitives/consensus/babe/src/digests.rs b/primitives/consensus/babe/src/digests.rs index 1e4c820379d7a..47bbb41d61fbd 100644 --- a/primitives/consensus/babe/src/digests.rs +++ b/primitives/consensus/babe/src/digests.rs @@ -121,8 +121,9 @@ impl PreDigest { pub fn vrf(&self) -> Option<(&VRFOutput, &VRFProof)> { match self { PreDigest::Primary(primary) => Some((&primary.vrf_output, &primary.vrf_proof)), - PreDigest::SecondaryVRF(secondary) => - Some((&secondary.vrf_output, &secondary.vrf_proof)), + PreDigest::SecondaryVRF(secondary) => { + Some((&secondary.vrf_output, &secondary.vrf_proof)) + }, PreDigest::SecondaryPlain(_) => None, } } diff --git a/primitives/consensus/babe/src/lib.rs b/primitives/consensus/babe/src/lib.rs index 621ab859b914f..71b94ea005802 100644 --- a/primitives/consensus/babe/src/lib.rs +++ b/primitives/consensus/babe/src/lib.rs @@ -279,7 +279,7 @@ where let pre_hash = header.hash(); if !offender.verify(&pre_hash.as_ref(), &seal) { - return None + return None; } Some(()) @@ -288,7 +288,7 @@ where let verify_proof = || { // we must have different headers for the equivocation to be valid if proof.first_header.hash() == proof.second_header.hash() { - return None + return None; } let first_pre_digest = find_pre_digest(&proof.first_header)?; @@ -296,15 +296,15 @@ where // both headers must be targetting the same slot and it must // be the same as the one in the proof. - if proof.slot != first_pre_digest.slot() || - first_pre_digest.slot() != second_pre_digest.slot() + if proof.slot != first_pre_digest.slot() + || first_pre_digest.slot() != second_pre_digest.slot() { - return None + return None; } // both headers must have been authored by the same authority if first_pre_digest.authority_index() != second_pre_digest.authority_index() { - return None + return None; } // we finally verify that the expected authority has signed both headers and diff --git a/primitives/consensus/vrf/src/schnorrkel.rs b/primitives/consensus/vrf/src/schnorrkel.rs index 8666de6c4bc0c..ab37524e41e50 100644 --- a/primitives/consensus/vrf/src/schnorrkel.rs +++ b/primitives/consensus/vrf/src/schnorrkel.rs @@ -163,24 +163,33 @@ fn convert_error(e: SignatureError) -> codec::Error { ScalarFormatError => "Signature error: `ScalarFormatError`".into(), NotMarkedSchnorrkel => "Signature error: `NotMarkedSchnorrkel`".into(), BytesLengthError { .. } => "Signature error: `BytesLengthError`".into(), - MuSigAbsent { musig_stage: Commitment } => - "Signature error: `MuSigAbsent` at stage `Commitment`".into(), - MuSigAbsent { musig_stage: Reveal } => - "Signature error: `MuSigAbsent` at stage `Reveal`".into(), - MuSigAbsent { musig_stage: Cosignature } => - "Signature error: `MuSigAbsent` at stage `Commitment`".into(), - MuSigInconsistent { musig_stage: Commitment, duplicate: true } => - "Signature error: `MuSigInconsistent` at stage `Commitment` on duplicate".into(), - MuSigInconsistent { musig_stage: Commitment, duplicate: false } => - "Signature error: `MuSigInconsistent` at stage `Commitment` on not duplicate".into(), - MuSigInconsistent { musig_stage: Reveal, duplicate: true } => - "Signature error: `MuSigInconsistent` at stage `Reveal` on duplicate".into(), - MuSigInconsistent { musig_stage: Reveal, duplicate: false } => - "Signature error: `MuSigInconsistent` at stage `Reveal` on not duplicate".into(), - MuSigInconsistent { musig_stage: Cosignature, duplicate: true } => - "Signature error: `MuSigInconsistent` at stage `Cosignature` on duplicate".into(), - MuSigInconsistent { musig_stage: Cosignature, duplicate: false } => - "Signature error: `MuSigInconsistent` at stage `Cosignature` on not duplicate".into(), + MuSigAbsent { musig_stage: Commitment } => { + "Signature error: `MuSigAbsent` at stage `Commitment`".into() + }, + MuSigAbsent { musig_stage: Reveal } => { + "Signature error: `MuSigAbsent` at stage `Reveal`".into() + }, + MuSigAbsent { musig_stage: Cosignature } => { + "Signature error: `MuSigAbsent` at stage `Commitment`".into() + }, + MuSigInconsistent { musig_stage: Commitment, duplicate: true } => { + "Signature error: `MuSigInconsistent` at stage `Commitment` on duplicate".into() + }, + MuSigInconsistent { musig_stage: Commitment, duplicate: false } => { + "Signature error: `MuSigInconsistent` at stage `Commitment` on not duplicate".into() + }, + MuSigInconsistent { musig_stage: Reveal, duplicate: true } => { + "Signature error: `MuSigInconsistent` at stage `Reveal` on duplicate".into() + }, + MuSigInconsistent { musig_stage: Reveal, duplicate: false } => { + "Signature error: `MuSigInconsistent` at stage `Reveal` on not duplicate".into() + }, + MuSigInconsistent { musig_stage: Cosignature, duplicate: true } => { + "Signature error: `MuSigInconsistent` at stage `Cosignature` on duplicate".into() + }, + MuSigInconsistent { musig_stage: Cosignature, duplicate: false } => { + "Signature error: `MuSigInconsistent` at stage `Cosignature` on not duplicate".into() + }, } } diff --git a/primitives/core/hashing/proc-macro/src/impls.rs b/primitives/core/hashing/proc-macro/src/impls.rs index 3058cf019b143..42e35896aff97 100644 --- a/primitives/core/hashing/proc-macro/src/impls.rs +++ b/primitives/core/hashing/proc-macro/src/impls.rs @@ -48,20 +48,22 @@ impl Parse for InputBytes { syn::Expr::Lit(lit) => match &lit.lit { syn::Lit::Int(b) => bytes.push(b.base10_parse()?), syn::Lit::Byte(b) => bytes.push(b.value()), - _ => + _ => { return Err(syn::Error::new( input.span(), "Expected array of u8 elements.".to_string(), - )), + )) + }, }, - _ => + _ => { return Err(syn::Error::new( input.span(), "Expected array of u8 elements.".to_string(), - )), + )) + }, } } - return Ok(InputBytes(bytes)) + return Ok(InputBytes(bytes)); }, Err(_e) => (), } diff --git a/primitives/core/src/bounded/bounded_btree_map.rs b/primitives/core/src/bounded/bounded_btree_map.rs index d2c148d6de9c5..a35db28577c05 100644 --- a/primitives/core/src/bounded/bounded_btree_map.rs +++ b/primitives/core/src/bounded/bounded_btree_map.rs @@ -41,7 +41,7 @@ where fn decode(input: &mut I) -> Result { let inner = BTreeMap::::decode(input)?; if inner.len() > S::get() as usize { - return Err("BoundedBTreeMap exceeds its limit".into()) + return Err("BoundedBTreeMap exceeds its limit".into()); } Ok(Self(inner, PhantomData)) } diff --git a/primitives/core/src/bounded/bounded_btree_set.rs b/primitives/core/src/bounded/bounded_btree_set.rs index 5feac6b7150f0..fc776dc8ee056 100644 --- a/primitives/core/src/bounded/bounded_btree_set.rs +++ b/primitives/core/src/bounded/bounded_btree_set.rs @@ -40,7 +40,7 @@ where fn decode(input: &mut I) -> Result { let inner = BTreeSet::::decode(input)?; if inner.len() > S::get() as usize { - return Err("BoundedBTreeSet exceeds its limit".into()) + return Err("BoundedBTreeSet exceeds its limit".into()); } Ok(Self(inner, PhantomData)) } diff --git a/primitives/core/src/bounded/bounded_vec.rs b/primitives/core/src/bounded/bounded_vec.rs index 2f39f3340ce50..96f7792f831c5 100644 --- a/primitives/core/src/bounded/bounded_vec.rs +++ b/primitives/core/src/bounded/bounded_vec.rs @@ -91,7 +91,7 @@ where while let Some(value) = seq.next_element()? { values.push(value); if values.len() > max { - return Err(A::Error::custom("out of bounds")) + return Err(A::Error::custom("out of bounds")); } } @@ -310,7 +310,7 @@ impl> Decode for BoundedVec { fn decode(input: &mut I) -> Result { let inner = Vec::::decode(input)?; if inner.len() > S::get() as usize { - return Err("BoundedVec exceeds its limit".into()) + return Err("BoundedVec exceeds its limit".into()); } Ok(Self(inner, PhantomData)) } @@ -494,7 +494,7 @@ impl> BoundedVec { Ok(None) } else { if index == 0 { - return Err(()) + return Err(()); } sp_std::mem::swap(&mut self[0], &mut element); // `[0..index] cannot panic since self.len() >= index. @@ -517,11 +517,11 @@ impl> BoundedVec { pub fn force_insert_keep_left(&mut self, index: usize, element: T) -> Result, ()> { // Check against panics. if Self::bound() < index || self.len() < index || Self::bound() == 0 { - return Err(()) + return Err(()); } // Noop condition. if Self::bound() == index && self.len() <= Self::bound() { - return Err(()) + return Err(()); } let maybe_removed = if self.is_full() { // defensive-only: since we are at capacity, this is a noop. @@ -549,11 +549,11 @@ impl> BoundedVec { pub fn slide(&mut self, index: usize, insert_position: usize) -> bool { // Check against panics. if self.len() <= index || self.len() < insert_position || index == usize::MAX { - return false + return false; } // Noop conditions. if index == insert_position || index + 1 == insert_position { - return false + return false; } if insert_position < index && index < self.len() { // --- --- --- === === === === @@@ --- --- --- @@ -566,7 +566,7 @@ impl> BoundedVec { // --- --- --- @@@ === === === === --- --- --- // ^N^ self[insert_position..index + 1].rotate_right(1); - return true + return true; } else if insert_position > 0 && index + 1 < insert_position { // Note that the apparent asymmetry of these two branches is due to the // fact that the "new" position is the position to be inserted *before*. @@ -580,7 +580,7 @@ impl> BoundedVec { // --- --- --- === === === === @@@ --- --- --- // ^N^ self[index..insert_position].rotate_left(1); - return true + return true; } debug_assert!(false, "all noop conditions should have been covered above"); diff --git a/primitives/core/src/crypto.rs b/primitives/core/src/crypto.rs index 06703acea7202..e5faf54cab92c 100644 --- a/primitives/core/src/crypto.rs +++ b/primitives/core/src/crypto.rs @@ -265,7 +265,7 @@ pub trait Ss58Codec: Sized + AsMut<[u8]> + AsRef<[u8]> + ByteArray { let data = s.from_base58().map_err(|_| PublicError::BadBase58)?; if data.len() < 2 { - return Err(PublicError::BadLength) + return Err(PublicError::BadLength); } let (prefix_len, ident) = match data[0] { 0..=63 => (1, data[0] as u16), @@ -282,18 +282,18 @@ pub trait Ss58Codec: Sized + AsMut<[u8]> + AsRef<[u8]> + ByteArray { _ => return Err(PublicError::InvalidPrefix), }; if data.len() != prefix_len + body_len + CHECKSUM_LEN { - return Err(PublicError::BadLength) + return Err(PublicError::BadLength); } let format = ident.into(); if !Self::format_is_allowed(format) { - return Err(PublicError::FormatNotAllowed) + return Err(PublicError::FormatNotAllowed); } let hash = ss58hash(&data[0..body_len + prefix_len]); let checksum = &hash[0..CHECKSUM_LEN]; if data[body_len + prefix_len..body_len + prefix_len + CHECKSUM_LEN] != *checksum { // Invalid checksum. - return Err(PublicError::InvalidChecksum) + return Err(PublicError::InvalidChecksum); } let result = Self::from_slice(&data[prefix_len..body_len + prefix_len]) @@ -1068,7 +1068,7 @@ impl<'a> TryFrom<&'a str> for KeyTypeId { fn try_from(x: &'a str) -> Result { let b = x.as_bytes(); if b.len() != 4 { - return Err(()) + return Err(()); } let mut res = KeyTypeId::default(); res.0.copy_from_slice(&b[0..4]); @@ -1227,14 +1227,16 @@ mod tests { password, path: path.into_iter().chain(path_iter).collect(), }, - TestPair::GeneratedFromPhrase { phrase, password } => - TestPair::Standard { phrase, password, path: path_iter.collect() }, - x => + TestPair::GeneratedFromPhrase { phrase, password } => { + TestPair::Standard { phrase, password, path: path_iter.collect() } + }, + x => { if path_iter.count() == 0 { x } else { - return Err(()) - }, + return Err(()); + } + }, }, None, )) diff --git a/primitives/core/src/ecdsa.rs b/primitives/core/src/ecdsa.rs index ca6b800625bc2..e3bd99116cbd1 100644 --- a/primitives/core/src/ecdsa.rs +++ b/primitives/core/src/ecdsa.rs @@ -142,7 +142,7 @@ impl TryFrom<&[u8]> for Public { fn try_from(data: &[u8]) -> Result { if data.len() != Self::LEN { - return Err(()) + return Err(()); } let mut r = [0u8; Self::LEN]; r.copy_from_slice(data); @@ -317,7 +317,7 @@ impl Signature { /// you are certain that the array actually is a signature. GIGO! pub fn from_slice(data: &[u8]) -> Option { if data.len() != 65 { - return None + return None; } let mut r = [0u8; 65]; r.copy_from_slice(data); diff --git a/primitives/core/src/ed25519.rs b/primitives/core/src/ed25519.rs index e85eb87c9fd83..2f5c2212eed36 100644 --- a/primitives/core/src/ed25519.rs +++ b/primitives/core/src/ed25519.rs @@ -114,7 +114,7 @@ impl TryFrom<&[u8]> for Public { fn try_from(data: &[u8]) -> Result { if data.len() != Self::LEN { - return Err(()) + return Err(()); } let mut r = [0u8; Self::LEN]; r.copy_from_slice(data); @@ -316,7 +316,7 @@ impl Signature { /// you are certain that the array actually is a signature. GIGO! pub fn from_slice(data: &[u8]) -> Option { if data.len() != 64 { - return None + return None; } let mut r = [0u8; 64]; r.copy_from_slice(data); diff --git a/primitives/core/src/lib.rs b/primitives/core/src/lib.rs index fda7604d5337f..77dab804c10e5 100644 --- a/primitives/core/src/lib.rs +++ b/primitives/core/src/lib.rs @@ -116,10 +116,11 @@ impl ExecutionContext { Importing | Syncing | BlockConstruction => offchain::Capabilities::empty(), // Enable keystore, transaction pool and Offchain DB reads by default for offchain // calls. - OffchainCall(None) => - offchain::Capabilities::KEYSTORE | - offchain::Capabilities::OFFCHAIN_DB_READ | - offchain::Capabilities::TRANSACTION_POOL, + OffchainCall(None) => { + offchain::Capabilities::KEYSTORE + | offchain::Capabilities::OFFCHAIN_DB_READ + | offchain::Capabilities::TRANSACTION_POOL + }, OffchainCall(Some((_, capabilities))) => *capabilities, } } diff --git a/primitives/core/src/offchain/storage.rs b/primitives/core/src/offchain/storage.rs index cf2c93641f245..6e6238c651c7b 100644 --- a/primitives/core/src/offchain/storage.rs +++ b/primitives/core/src/offchain/storage.rs @@ -73,13 +73,14 @@ impl OffchainStorage for InMemOffchainStorage { let key = prefix.iter().chain(key).cloned().collect(); match self.storage.entry(key) { - Entry::Vacant(entry) => + Entry::Vacant(entry) => { if old_value.is_none() { entry.insert(new_value.to_vec()); true } else { false - }, + } + }, Entry::Occupied(ref mut entry) if Some(entry.get().as_slice()) == old_value => { entry.insert(new_value.to_vec()); true diff --git a/primitives/core/src/offchain/testing.rs b/primitives/core/src/offchain/testing.rs index a2065eb17717f..0e2d57ba2f360 100644 --- a/primitives/core/src/offchain/testing.rs +++ b/primitives/core/src/offchain/testing.rs @@ -80,8 +80,9 @@ impl TestPersistentOffchainDB { let mut me = self.persistent.write(); for ((_prefix, key), value_operation) in changes { match value_operation { - OffchainOverlayedChange::SetValue(val) => - me.set(Self::PREFIX, key.as_slice(), val.as_slice()), + OffchainOverlayedChange::SetValue(val) => { + me.set(Self::PREFIX, key.as_slice(), val.as_slice()) + }, OffchainOverlayedChange::Remove => me.remove(Self::PREFIX, key.as_slice()), } } @@ -381,10 +382,12 @@ impl offchain::DbExternalities for TestOffchainExt { ) -> bool { let mut state = self.0.write(); match kind { - StorageKind::LOCAL => - state.local_storage.compare_and_set(b"", key, old_value, new_value), - StorageKind::PERSISTENT => - state.persistent_storage.compare_and_set(b"", key, old_value, new_value), + StorageKind::LOCAL => { + state.local_storage.compare_and_set(b"", key, old_value, new_value) + }, + StorageKind::PERSISTENT => { + state.persistent_storage.compare_and_set(b"", key, old_value, new_value) + }, } } diff --git a/primitives/core/src/sr25519.rs b/primitives/core/src/sr25519.rs index 9064fb7427393..93c33d1320fe9 100644 --- a/primitives/core/src/sr25519.rs +++ b/primitives/core/src/sr25519.rs @@ -145,7 +145,7 @@ impl TryFrom<&[u8]> for Public { fn try_from(data: &[u8]) -> Result { if data.len() != Self::LEN { - return Err(()) + return Err(()); } let mut r = [0u8; 32]; r.copy_from_slice(data); @@ -341,7 +341,7 @@ impl Signature { /// you are certain that the array actually is a signature. GIGO! pub fn from_slice(data: &[u8]) -> Option { if data.len() != 64 { - return None + return None; } let mut r = [0u8; 64]; r.copy_from_slice(data); diff --git a/primitives/database/src/kvdb.rs b/primitives/database/src/kvdb.rs index 5fe5fda307a1e..dd6798c2359e7 100644 --- a/primitives/database/src/kvdb.rs +++ b/primitives/database/src/kvdb.rs @@ -53,7 +53,7 @@ impl DbAdapter { return Err(error::DatabaseError(Box::new(std::io::Error::new( std::io::ErrorKind::Other, format!("Unexpected counter len {}", data.len()), - )))) + )))); } counter_data.copy_from_slice(&data); let counter = u32::from_le_bytes(counter_data); diff --git a/primitives/debug-derive/src/impls.rs b/primitives/debug-derive/src/impls.rs index 51a4d876c79b6..72dab228450f9 100644 --- a/primitives/debug-derive/src/impls.rs +++ b/primitives/debug-derive/src/impls.rs @@ -179,8 +179,9 @@ mod implementation { name_str, Fields::new(f.named.iter(), Some(syn::Token!(self)(Span::call_site()))), ), - syn::Fields::Unnamed(ref f) => - derive_fields(name_str, Fields::new(f.unnamed.iter(), None)), + syn::Fields::Unnamed(ref f) => { + derive_fields(name_str, Fields::new(f.unnamed.iter(), None)) + }, syn::Fields::Unit => derive_fields(name_str, Fields::Indexed { indices: vec![] }), } } diff --git a/primitives/finality-grandpa/src/lib.rs b/primitives/finality-grandpa/src/lib.rs index f92fee4f12963..f19c3a0125645 100644 --- a/primitives/finality-grandpa/src/lib.rs +++ b/primitives/finality-grandpa/src/lib.rs @@ -332,10 +332,10 @@ where macro_rules! check { ( $equivocation:expr, $message:expr ) => { // if both votes have the same target the equivocation is invalid. - if $equivocation.first.0.target_hash == $equivocation.second.0.target_hash && - $equivocation.first.0.target_number == $equivocation.second.0.target_number + if $equivocation.first.0.target_hash == $equivocation.second.0.target_hash + && $equivocation.first.0.target_number == $equivocation.second.0.target_number { - return false + return false; } // check signatures on both votes are valid @@ -514,7 +514,7 @@ impl<'a> Decode for VersionedAuthorityList<'a> { fn decode(value: &mut I) -> Result { let (version, authorities): (u8, AuthorityList) = Decode::decode(value)?; if version != AUTHORITIES_VERSION { - return Err("unknown Grandpa authorities version".into()) + return Err("unknown Grandpa authorities version".into()); } Ok(authorities.into()) } diff --git a/primitives/inherents/src/lib.rs b/primitives/inherents/src/lib.rs index a3ef963c47b39..6dfeb70200743 100644 --- a/primitives/inherents/src/lib.rs +++ b/primitives/inherents/src/lib.rs @@ -313,7 +313,7 @@ impl CheckInherentsResult { ) -> Result<(), Error> { // Don't accept any other error if self.fatal_error { - return Err(Error::FatalErrorReported) + return Err(Error::FatalErrorReported); } if error.is_fatal_error() { @@ -361,9 +361,9 @@ impl CheckInherentsResult { #[cfg(feature = "std")] impl PartialEq for CheckInherentsResult { fn eq(&self, other: &Self) -> bool { - self.fatal_error == other.fatal_error && - self.okay == other.okay && - self.errors.data == other.errors.data + self.fatal_error == other.fatal_error + && self.okay == other.okay + && self.errors.data == other.errors.data } } diff --git a/primitives/io/src/batch_verifier.rs b/primitives/io/src/batch_verifier.rs index 501d986758bf2..430235acbb2da 100644 --- a/primitives/io/src/batch_verifier.rs +++ b/primitives/io/src/batch_verifier.rs @@ -65,7 +65,7 @@ impl BatchVerifier { ) -> bool { // there is already invalid transaction encountered if self.invalid.load(AtomicOrdering::Relaxed) { - return false + return false; } let invalid_clone = self.invalid.clone(); @@ -118,7 +118,7 @@ impl BatchVerifier { message: Vec, ) -> bool { if self.invalid.load(AtomicOrdering::Relaxed) { - return false + return false; } self.sr25519_items.push(Sr25519BatchItem { signature, pub_key, message }); @@ -172,7 +172,7 @@ impl BatchVerifier { ); if !Self::verify_sr25519_batch(std::mem::take(&mut self.sr25519_items)) { - return false + return false; } if pending.len() > 0 { @@ -196,7 +196,7 @@ impl BatchVerifier { "Haven't received async result from verification task. Returning false.", ); - return false + return false; } } diff --git a/primitives/keystore/src/lib.rs b/primitives/keystore/src/lib.rs index 6540e71bc3fe0..8e6ba8a4aa9b1 100644 --- a/primitives/keystore/src/lib.rs +++ b/primitives/keystore/src/lib.rs @@ -140,11 +140,11 @@ pub trait CryptoStore: Send + Sync { msg: &[u8], ) -> Result)>, Error> { if keys.len() == 1 { - return Ok(self.sign_with(id, &keys[0], msg).await?.map(|s| (keys[0].clone(), s))) + return Ok(self.sign_with(id, &keys[0], msg).await?.map(|s| (keys[0].clone(), s))); } else { for k in self.supported_keys(id, keys).await? { if let Ok(Some(sign)) = self.sign_with(id, &k, msg).await { - return Ok(Some((k, sign))) + return Ok(Some((k, sign))); } } } @@ -317,11 +317,11 @@ pub trait SyncCryptoStore: CryptoStore + Send + Sync { if keys.len() == 1 { return Ok( SyncCryptoStore::sign_with(self, id, &keys[0], msg)?.map(|s| (keys[0].clone(), s)) - ) + ); } else { for k in SyncCryptoStore::supported_keys(self, id, keys)? { if let Ok(Some(sign)) = SyncCryptoStore::sign_with(self, id, &k, msg) { - return Ok(Some((k, sign))) + return Ok(Some((k, sign))); } } } diff --git a/primitives/keystore/src/testing.rs b/primitives/keystore/src/testing.rs index a9ec6709d912a..b74236acbb236 100644 --- a/primitives/keystore/src/testing.rs +++ b/primitives/keystore/src/testing.rs @@ -368,8 +368,11 @@ impl SyncCryptoStore for KeyStore { transcript_data: VRFTranscriptData, ) -> Result, Error> { let transcript = make_transcript(transcript_data); - let pair = - if let Some(k) = self.sr25519_key_pair(key_type, public) { k } else { return Ok(None) }; + let pair = if let Some(k) = self.sr25519_key_pair(key_type, public) { + k + } else { + return Ok(None); + }; let (inout, proof, _) = pair.as_ref().vrf_sign(transcript); Ok(Some(VRFSignature { output: inout.to_output(), proof })) diff --git a/primitives/maybe-compressed-blob/src/lib.rs b/primitives/maybe-compressed-blob/src/lib.rs index 99c12ed39bc04..6fd29b966c950 100644 --- a/primitives/maybe-compressed-blob/src/lib.rs +++ b/primitives/maybe-compressed-blob/src/lib.rs @@ -85,7 +85,7 @@ pub fn decompress(blob: &[u8], bomb_limit: usize) -> Result, Error> { /// able to differentiate it from a compression bomb. pub fn compress(blob: &[u8], bomb_limit: usize) -> Option> { if blob.len() > bomb_limit { - return None + return None; } let mut buf = ZSTD_PREFIX.to_vec(); diff --git a/primitives/npos-elections/fuzzer/src/common.rs b/primitives/npos-elections/fuzzer/src/common.rs index ad9bd43f9bce0..8f72928f49b04 100644 --- a/primitives/npos-elections/fuzzer/src/common.rs +++ b/primitives/npos-elections/fuzzer/src/common.rs @@ -158,10 +158,12 @@ pub fn generate_random_npos_result( ( match election_type { - ElectionType::Phragmen(conf) => - seq_phragmen(to_elect, candidates.clone(), voters.clone(), conf).unwrap(), - ElectionType::Phragmms(conf) => - phragmms(to_elect, candidates.clone(), voters.clone(), conf).unwrap(), + ElectionType::Phragmen(conf) => { + seq_phragmen(to_elect, candidates.clone(), voters.clone(), conf).unwrap() + }, + ElectionType::Phragmms(conf) => { + phragmms(to_elect, candidates.clone(), voters.clone(), conf).unwrap() + }, }, candidates, voters, diff --git a/primitives/npos-elections/fuzzer/src/phragmen_balancing.rs b/primitives/npos-elections/fuzzer/src/phragmen_balancing.rs index e053f9aa0cddd..c4544557f09e4 100644 --- a/primitives/npos-elections/fuzzer/src/phragmen_balancing.rs +++ b/primitives/npos-elections/fuzzer/src/phragmen_balancing.rs @@ -60,7 +60,7 @@ fn main() { if score.minimal_stake == 0 { // such cases cannot be improved by balancing. - return + return; } score }; @@ -88,9 +88,9 @@ fn main() { // The only guarantee of balancing is such that the first and third element of the // score cannot decrease. assert!( - balanced_score.minimal_stake >= unbalanced_score.minimal_stake && - balanced_score.sum_stake == unbalanced_score.sum_stake && - balanced_score.sum_stake_squared <= unbalanced_score.sum_stake_squared + balanced_score.minimal_stake >= unbalanced_score.minimal_stake + && balanced_score.sum_stake == unbalanced_score.sum_stake + && balanced_score.sum_stake_squared <= unbalanced_score.sum_stake_squared ); } }); diff --git a/primitives/npos-elections/fuzzer/src/phragmms_balancing.rs b/primitives/npos-elections/fuzzer/src/phragmms_balancing.rs index 3f114674e29d9..6af4f6492ded3 100644 --- a/primitives/npos-elections/fuzzer/src/phragmms_balancing.rs +++ b/primitives/npos-elections/fuzzer/src/phragmms_balancing.rs @@ -60,7 +60,7 @@ fn main() { if score.minimal_stake == 0 { // such cases cannot be improved by balancing. - return + return; } score }; @@ -85,9 +85,9 @@ fn main() { // The only guarantee of balancing is such that the first and third element of the score // cannot decrease. assert!( - balanced_score.minimal_stake >= unbalanced_score.minimal_stake && - balanced_score.sum_stake == unbalanced_score.sum_stake && - balanced_score.sum_stake_squared <= unbalanced_score.sum_stake_squared + balanced_score.minimal_stake >= unbalanced_score.minimal_stake + && balanced_score.sum_stake == unbalanced_score.sum_stake + && balanced_score.sum_stake_squared <= unbalanced_score.sum_stake_squared ); }); } diff --git a/primitives/npos-elections/src/balancing.rs b/primitives/npos-elections/src/balancing.rs index 4a713658ad38f..a71e5b08bf536 100644 --- a/primitives/npos-elections/src/balancing.rs +++ b/primitives/npos-elections/src/balancing.rs @@ -60,7 +60,7 @@ pub fn balance( config: &BalancingConfig, ) -> usize { if config.iterations == 0 { - return 0 + return 0; } let mut iter = 0; @@ -75,7 +75,7 @@ pub fn balance( iter += 1; if max_diff <= config.tolerance || iter >= config.iterations { - break iter + break iter; } } } @@ -94,7 +94,7 @@ pub(crate) fn balance_voter( // Either empty, or a self vote. Not much to do in either case. if elected_edges.len() <= 1 { - return Zero::zero() + return Zero::zero(); } // amount of stake from this voter that is used in edges. @@ -125,7 +125,7 @@ pub(crate) fn balance_voter( let mut difference = max_stake.saturating_sub(*min_stake); difference = difference.saturating_add(voter.budget.saturating_sub(stake_used)); if difference < tolerance { - return difference + return difference; } difference } else { @@ -151,7 +151,7 @@ pub(crate) fn balance_voter( if temp.saturating_sub(cumulative_backed_stake) > voter.budget { // defensive only. length of elected_edges is checked to be above 1. last_index = index.saturating_sub(1) as usize; - break + break; } cumulative_backed_stake = cumulative_backed_stake.saturating_add(backed_stake); } diff --git a/primitives/npos-elections/src/lib.rs b/primitives/npos-elections/src/lib.rs index d0c9ed18caddc..15896775bc566 100644 --- a/primitives/npos-elections/src/lib.rs +++ b/primitives/npos-elections/src/lib.rs @@ -553,7 +553,7 @@ pub fn setup_inputs( for v in votes { if edges.iter().any(|e| e.who == v) { // duplicate edge. - continue + continue; } if let Some(idx) = c_idx_cache.get(&v) { // This candidate is valid + already cached. diff --git a/primitives/npos-elections/src/mock.rs b/primitives/npos-elections/src/mock.rs index 5a06e3f3c88ca..1a069bb5d5e98 100644 --- a/primitives/npos-elections/src/mock.rs +++ b/primitives/npos-elections/src/mock.rs @@ -145,7 +145,7 @@ where elected_candidates.push((winner.who, winner.approval_stake as ExtendedBalance)); } else { - break + break; } } @@ -190,7 +190,7 @@ pub(crate) fn equalize_float( } if max_diff < tolerance { - break + break; } } } @@ -207,7 +207,7 @@ where { let budget = budget_balance as f64; if elected_edges.is_empty() { - return 0.0 + return 0.0; } let stake_used = elected_edges.iter().fold(0.0, |s, e| s + e.1); @@ -235,7 +235,7 @@ where difference = max_stake - min_stake; difference = difference + budget - stake_used; if difference < tolerance { - return difference + return difference; } } else { difference = budget; @@ -266,7 +266,7 @@ where let stake_sub = stake_mul - cumulative_stake; if stake_sub > budget { last_index = idx.checked_sub(1).unwrap_or(0); - return + return; } cumulative_stake = cumulative_stake + stake; } diff --git a/primitives/npos-elections/src/node.rs b/primitives/npos-elections/src/node.rs index 6642a9ae39736..dbba156803450 100644 --- a/primitives/npos-elections/src/node.rs +++ b/primitives/npos-elections/src/node.rs @@ -93,7 +93,7 @@ impl Node { /// Returns true if `other` is the parent of `who`. pub fn is_parent_of(who: &NodeRef, other: &NodeRef) -> bool { if who.borrow().parent.is_none() { - return false + return false; } who.borrow().parent.as_ref() == Some(other) } @@ -127,7 +127,7 @@ impl Node { while let Some(ref next_parent) = current.clone().borrow().parent { if visited.contains(next_parent) { - break + break; } parent_path.push(next_parent.clone()); current = next_parent.clone(); diff --git a/primitives/npos-elections/src/phragmen.rs b/primitives/npos-elections/src/phragmen.rs index ca32780ed84b4..f69d436e20625 100644 --- a/primitives/npos-elections/src/phragmen.rs +++ b/primitives/npos-elections/src/phragmen.rs @@ -176,7 +176,7 @@ pub fn seq_phragmen_core( } } } else { - break + break; } } diff --git a/primitives/npos-elections/src/phragmms.rs b/primitives/npos-elections/src/phragmms.rs index 3fbbad75e2f8f..7becbb2b0543c 100644 --- a/primitives/npos-elections/src/phragmms.rs +++ b/primitives/npos-elections/src/phragmms.rs @@ -62,7 +62,7 @@ pub fn phragmms( balance(&mut voters, config); } } else { - break + break; } } diff --git a/primitives/npos-elections/src/pjr.rs b/primitives/npos-elections/src/pjr.rs index fd7c8ef539241..c982945926496 100644 --- a/primitives/npos-elections/src/pjr.rs +++ b/primitives/npos-elections/src/pjr.rs @@ -51,8 +51,8 @@ pub fn standard_threshold( ) -> Threshold { weights .into_iter() - .fold(Threshold::zero(), |acc, elem| acc.saturating_add(elem)) / - committee_size.max(1) as Threshold + .fold(Threshold::zero(), |acc, elem| acc.saturating_add(elem)) + / committee_size.max(1) as Threshold } /// Check a solution to be PJR. @@ -307,7 +307,7 @@ fn prepare_pjr_input( for t in ts { if edges.iter().any(|e| e.who == t) { // duplicate edge. - continue + continue; } if let Some(idx) = candidates_index.get(&t) { diff --git a/primitives/npos-elections/src/reduce.rs b/primitives/npos-elections/src/reduce.rs index c802a29504709..f8ec95ca12e1d 100644 --- a/primitives/npos-elections/src/reduce.rs +++ b/primitives/npos-elections/src/reduce.rs @@ -64,7 +64,7 @@ type Map = BTreeMap<(A, A), A>; fn combinations_2(input: &[T]) -> Vec<(T, T)> { let n = input.len(); if n < 2 { - return Default::default() + return Default::default(); } let mut comb = Vec::with_capacity(n * (n - 1) / 2); @@ -142,13 +142,13 @@ fn reduce_4(assignments: &mut Vec>) -> u32 { .filter(|(t, _)| *t == v1 || *t == v2) .count() != 2 { - continue + continue; } // check if other_who voted for the same pair v1, v2. let maybe_other_assignments = assignments.iter().find(|a| a.who == *other_who); if maybe_other_assignments.is_none() { - continue + continue; } let other_assignment = maybe_other_assignments.expect("value is checked to be 'Some'"); @@ -177,7 +177,7 @@ fn reduce_4(assignments: &mut Vec>) -> u32 { if other_votes_count < 2 { // This is not a cycle. Replace and continue. *other_who = who.clone(); - continue + continue; } else if other_votes_count == 2 { // This is a cycle. let mut who_cycle_votes: Vec<(A, ExtendedBalance)> = Vec::with_capacity(2); @@ -188,7 +188,7 @@ fn reduce_4(assignments: &mut Vec>) -> u32 { }); if who_cycle_votes.len() != 2 { - continue + continue; } // Align the targets similarly. This helps with the circulation below. @@ -334,7 +334,7 @@ fn reduce_all(assignments: &mut Vec>) -> u32 let maybe_dist = assignments[assignment_index].distribution.get(dist_index); if maybe_dist.is_none() { // The rest of this loop is moot. - break + break; } let (target, _) = maybe_dist.expect("Value checked to be some").clone(); @@ -361,17 +361,17 @@ fn reduce_all(assignments: &mut Vec>) -> u32 (false, false) => { Node::set_parent_of(&target_node, &voter_node); dist_index += 1; - continue + continue; }, (false, true) => { Node::set_parent_of(&voter_node, &target_node); dist_index += 1; - continue + continue; }, (true, false) => { Node::set_parent_of(&target_node, &voter_node); dist_index += 1; - continue + continue; }, (true, true) => { /* don't continue and execute the rest */ }, }; @@ -492,7 +492,7 @@ fn reduce_all(assignments: &mut Vec>) -> u32 ) => (min_value, min_target, min_voter, min_index, min_direction), _ => { sp_runtime::print("UNREACHABLE code reached in `reduce` algorithm. This must be a bug."); - break + break; }, }; @@ -608,7 +608,7 @@ fn reduce_all(assignments: &mut Vec>) -> u32 let current = voter_root_path[i].clone().borrow().id.who.clone(); let next = voter_root_path[i + 1].clone().borrow().id.who.clone(); if min_edge.contains(¤t) && min_edge.contains(&next) { - break + break; } Node::set_parent_of(&voter_root_path[i + 1], &voter_root_path[i]); } @@ -619,7 +619,7 @@ fn reduce_all(assignments: &mut Vec>) -> u32 let current = target_root_path[i].clone().borrow().id.who.clone(); let next = target_root_path[i + 1].clone().borrow().id.who.clone(); if min_edge.contains(¤t) && min_edge.contains(&next) { - break + break; } Node::set_parent_of(&target_root_path[i + 1], &target_root_path[i]); } diff --git a/primitives/runtime-interface/proc-macro/src/lib.rs b/primitives/runtime-interface/proc-macro/src/lib.rs index afba38993fe76..0be5b6f77fe68 100644 --- a/primitives/runtime-interface/proc-macro/src/lib.rs +++ b/primitives/runtime-interface/proc-macro/src/lib.rs @@ -66,7 +66,7 @@ impl Parse for Options { } else if lookahead.peek(Token![,]) { let _ = input.parse::(); } else { - return Err(lookahead.error()) + return Err(lookahead.error()); } } Ok(res) diff --git a/primitives/runtime-interface/proc-macro/src/pass_by/enum_.rs b/primitives/runtime-interface/proc-macro/src/pass_by/enum_.rs index e25295fdca5cb..4748f3bc5619e 100644 --- a/primitives/runtime-interface/proc-macro/src/pass_by/enum_.rs +++ b/primitives/runtime-interface/proc-macro/src/pass_by/enum_.rs @@ -81,7 +81,7 @@ pub fn derive_impl(input: DeriveInput) -> Result { /// enum or a variant is not an unit. fn get_enum_field_idents(data: &Data) -> Result>> { match data { - Data::Enum(d) => + Data::Enum(d) => { if d.variants.len() <= 256 { Ok(d.variants.iter().map(|v| { if let Fields::Unit = v.fields { @@ -95,7 +95,8 @@ fn get_enum_field_idents(data: &Data) -> Result Err(Error::new(Span::call_site(), "`PassByEnum` only supports enums as input type.")), } } diff --git a/primitives/runtime-interface/proc-macro/src/pass_by/inner.rs b/primitives/runtime-interface/proc-macro/src/pass_by/inner.rs index 7a527af129467..1632286c3938c 100644 --- a/primitives/runtime-interface/proc-macro/src/pass_by/inner.rs +++ b/primitives/runtime-interface/proc-macro/src/pass_by/inner.rs @@ -93,11 +93,11 @@ fn extract_inner_ty_and_name(data: &Data) -> Result<(Type, Option)> { match struct_data.fields { Fields::Named(ref named) if named.named.len() == 1 => { let field = &named.named[0]; - return Ok((field.ty.clone(), field.ident.clone())) + return Ok((field.ty.clone(), field.ident.clone())); }, Fields::Unnamed(ref unnamed) if unnamed.unnamed.len() == 1 => { let field = &unnamed.unnamed[0]; - return Ok((field.ty.clone(), field.ident.clone())) + return Ok((field.ty.clone(), field.ident.clone())); }, _ => {}, } diff --git a/primitives/runtime-interface/proc-macro/src/runtime_interface/host_function_interface.rs b/primitives/runtime-interface/proc-macro/src/runtime_interface/host_function_interface.rs index 03da0bed59815..2b746c0b54798 100644 --- a/primitives/runtime-interface/proc-macro/src/runtime_interface/host_function_interface.rs +++ b/primitives/runtime-interface/proc-macro/src/runtime_interface/host_function_interface.rs @@ -428,14 +428,15 @@ fn generate_wasm_interface_signature_for_host_function(sig: &Signature) -> Resul /// Generate the variable name that stores the FFI value. fn generate_ffi_value_var_name(pat: &Pat) -> Result { match pat { - Pat::Ident(pat_ident) => + Pat::Ident(pat_ident) => { if let Some(by_ref) = pat_ident.by_ref { Err(Error::new(by_ref.span(), "`ref` not supported!")) } else if let Some(sub_pattern) = &pat_ident.subpat { Err(Error::new(sub_pattern.0.span(), "Not supported!")) } else { Ok(Ident::new(&format!("{}_ffi_value", pat_ident.ident), Span::call_site())) - }, + } + }, _ => Err(Error::new(pat.span(), "Not supported as variable name!")), } } diff --git a/primitives/runtime-interface/proc-macro/src/runtime_interface/trait_decl_impl.rs b/primitives/runtime-interface/proc-macro/src/runtime_interface/trait_decl_impl.rs index 0ae0f5260286c..8e0ec7bd4b036 100644 --- a/primitives/runtime-interface/proc-macro/src/runtime_interface/trait_decl_impl.rs +++ b/primitives/runtime-interface/proc-macro/src/runtime_interface/trait_decl_impl.rs @@ -130,7 +130,7 @@ fn declare_essential_trait(trait_def: &ItemTrait) -> Result { let trait_ = &trait_def.ident; if let Some(param) = trait_def.generics.params.first() { - return Err(Error::new(param.span(), "Generic parameters not supported.")) + return Err(Error::new(param.span(), "Generic parameters not supported.")); } let interface = get_runtime_interface(trait_def)?; diff --git a/primitives/runtime-interface/proc-macro/src/utils.rs b/primitives/runtime-interface/proc-macro/src/utils.rs index 386eef153f45c..18937ac05d2ca 100644 --- a/primitives/runtime-interface/proc-macro/src/utils.rs +++ b/primitives/runtime-interface/proc-macro/src/utils.rs @@ -69,7 +69,7 @@ impl RuntimeInterfaceFunction { return Err(Error::new( item.sig.ident.span(), "Methods marked as #[trap_on_return] cannot return anything", - )) + )); } Ok(Self { item, should_trap_on_return }) @@ -127,13 +127,13 @@ impl RuntimeInterfaceFunctionSet { "Previous version with the same number defined here", )); - return Err(err) + return Err(err); } self.versions .insert(version.version, RuntimeInterfaceFunction::new(trait_item)?); - if self.latest_version_to_call.map_or(true, |v| v < version.version) && - version.is_callable() + if self.latest_version_to_call.map_or(true, |v| v < version.version) + && version.is_callable() { self.latest_version_to_call = Some(version.version); } @@ -315,7 +315,7 @@ impl Parse for VersionAttribute { Some(input.parse()?) } else { if !input.is_empty() { - return Err(Error::new(input.span(), "Unexpected token, expected `,`.")) + return Err(Error::new(input.span(), "Unexpected token, expected `,`.")); } None @@ -343,7 +343,7 @@ pub fn get_runtime_interface(trait_def: &ItemTrait) -> Result let version = get_item_version(item)?.unwrap_or_default(); if version.version < 1 { - return Err(Error::new(item.span(), "Version needs to be at least `1`.")) + return Err(Error::new(item.span(), "Version needs to be at least `1`.")); } match functions.entry(name.clone()) { @@ -366,7 +366,7 @@ pub fn get_runtime_interface(trait_def: &ItemTrait) -> Result "Unexpected version attribute: missing version '{}' for this function", next_expected ), - )) + )); } next_expected += 1; } diff --git a/primitives/runtime-interface/src/impls.rs b/primitives/runtime-interface/src/impls.rs index e801931c306cf..417dffb54d57b 100644 --- a/primitives/runtime-interface/src/impls.rs +++ b/primitives/runtime-interface/src/impls.rs @@ -196,7 +196,7 @@ impl FromFFIValue for Vec { let len = len as usize; if len == 0 { - return Vec::new() + return Vec::new(); } let data = unsafe { Vec::from_raw_parts(ptr as *mut u8, len, len) }; diff --git a/primitives/runtime-interface/tests/ui.rs b/primitives/runtime-interface/tests/ui.rs index f3d6aa59a0336..64de3311f5214 100644 --- a/primitives/runtime-interface/tests/ui.rs +++ b/primitives/runtime-interface/tests/ui.rs @@ -22,7 +22,7 @@ use std::env; fn ui() { // Only run the ui tests when `RUN_UI_TESTS` is set. if env::var("RUN_UI_TESTS").is_err() { - return + return; } // As trybuild is using `cargo check`, we don't need the real WASM binaries. diff --git a/primitives/runtime/src/curve.rs b/primitives/runtime/src/curve.rs index c040b7cf517e0..b35cc41149d36 100644 --- a/primitives/runtime/src/curve.rs +++ b/primitives/runtime/src/curve.rs @@ -75,7 +75,7 @@ impl<'a> PiecewiseLinear<'a> { let n = n.min(d.clone()); if self.points.is_empty() { - return N::zero() + return N::zero(); } let next_point_index = self.points.iter().position(|p| n < p.0 * d.clone()); @@ -85,11 +85,11 @@ impl<'a> PiecewiseLinear<'a> { (self.points[previous_point_index], self.points[next_point_index]) } else { // There is no previous points, take first point ordinate - return self.points.first().map(|p| p.1).unwrap_or_else(Perbill::zero) * d + return self.points.first().map(|p| p.1).unwrap_or_else(Perbill::zero) * d; } } else { // There is no next points, take last point ordinate - return self.points.last().map(|p| p.1).unwrap_or_else(Perbill::zero) * d + return self.points.last().map(|p| p.1).unwrap_or_else(Perbill::zero) * d; }; let delta_y = multiply_by_rational_saturating( diff --git a/primitives/runtime/src/generic/digest.rs b/primitives/runtime/src/generic/digest.rs index ec74ebb0d4e15..d431af2d912a4 100644 --- a/primitives/runtime/src/generic/digest.rs +++ b/primitives/runtime/src/generic/digest.rs @@ -354,11 +354,13 @@ impl<'a> DigestItemRef<'a> { /// return the opaque data it contains. pub fn try_as_raw(&self, id: OpaqueDigestItemId) -> Option<&'a [u8]> { match (id, self) { - (OpaqueDigestItemId::Consensus(w), &Self::Consensus(v, s)) | - (OpaqueDigestItemId::Seal(w), &Self::Seal(v, s)) | - (OpaqueDigestItemId::PreRuntime(w), &Self::PreRuntime(v, s)) + (OpaqueDigestItemId::Consensus(w), &Self::Consensus(v, s)) + | (OpaqueDigestItemId::Seal(w), &Self::Seal(v, s)) + | (OpaqueDigestItemId::PreRuntime(w), &Self::PreRuntime(v, s)) if v == w => - Some(s), + { + Some(s) + }, (OpaqueDigestItemId::Other, &Self::Other(s)) => Some(s), _ => None, } @@ -464,14 +466,18 @@ mod tests { let check = |digest_item_type: DigestItemType| { let (variant_name, digest_item) = match digest_item_type { DigestItemType::Other => ("Other", DigestItem::Other(Default::default())), - DigestItemType::Consensus => - ("Consensus", DigestItem::Consensus(Default::default(), Default::default())), - DigestItemType::Seal => - ("Seal", DigestItem::Seal(Default::default(), Default::default())), - DigestItemType::PreRuntime => - ("PreRuntime", DigestItem::PreRuntime(Default::default(), Default::default())), - DigestItemType::RuntimeEnvironmentUpdated => - ("RuntimeEnvironmentUpdated", DigestItem::RuntimeEnvironmentUpdated), + DigestItemType::Consensus => { + ("Consensus", DigestItem::Consensus(Default::default(), Default::default())) + }, + DigestItemType::Seal => { + ("Seal", DigestItem::Seal(Default::default(), Default::default())) + }, + DigestItemType::PreRuntime => { + ("PreRuntime", DigestItem::PreRuntime(Default::default(), Default::default())) + }, + DigestItemType::RuntimeEnvironmentUpdated => { + ("RuntimeEnvironmentUpdated", DigestItem::RuntimeEnvironmentUpdated) + }, }; let encoded = digest_item.encode(); let variant = variants diff --git a/primitives/runtime/src/generic/era.rs b/primitives/runtime/src/generic/era.rs index b26545fb8404e..6730c97b6fac3 100644 --- a/primitives/runtime/src/generic/era.rs +++ b/primitives/runtime/src/generic/era.rs @@ -105,8 +105,8 @@ impl Encode for Era { Self::Immortal => output.push_byte(0), Self::Mortal(period, phase) => { let quantize_factor = (*period as u64 >> 12).max(1); - let encoded = (period.trailing_zeros() - 1).clamp(1, 15) as u16 | - ((phase / quantize_factor) << 4) as u16; + let encoded = (period.trailing_zeros() - 1).clamp(1, 15) as u16 + | ((phase / quantize_factor) << 4) as u16; encoded.encode_to(output); }, } diff --git a/primitives/runtime/src/generic/header.rs b/primitives/runtime/src/generic/header.rs index a7b43608f2b78..fbec1b2223e14 100644 --- a/primitives/runtime/src/generic/header.rs +++ b/primitives/runtime/src/generic/header.rs @@ -62,11 +62,11 @@ where Hash::Output: parity_util_mem::MallocSizeOf, { fn size_of(&self, ops: &mut parity_util_mem::MallocSizeOfOps) -> usize { - self.parent_hash.size_of(ops) + - self.number.size_of(ops) + - self.state_root.size_of(ops) + - self.extrinsics_root.size_of(ops) + - self.digest.size_of(ops) + self.parent_hash.size_of(ops) + + self.number.size_of(ops) + + self.state_root.size_of(ops) + + self.extrinsics_root.size_of(ops) + + self.digest.size_of(ops) } } diff --git a/primitives/runtime/src/generic/unchecked_extrinsic.rs b/primitives/runtime/src/generic/unchecked_extrinsic.rs index fb333abd6ac6e..98d4eed7b8f7c 100644 --- a/primitives/runtime/src/generic/unchecked_extrinsic.rs +++ b/primitives/runtime/src/generic/unchecked_extrinsic.rs @@ -152,7 +152,7 @@ where let signed = lookup.lookup(signed)?; let raw_payload = SignedPayload::new(self.function, extra)?; if !raw_payload.using_encoded(|payload| signature.verify(payload, &signed)) { - return Err(InvalidTransaction::BadProof.into()) + return Err(InvalidTransaction::BadProof.into()); } let (function, extra, _) = raw_payload.deconstruct(); @@ -249,7 +249,7 @@ where let is_signed = version & 0b1000_0000 != 0; let version = version & 0b0111_1111; if version != EXTRINSIC_FORMAT_VERSION { - return Err("Invalid transaction version".into()) + return Err("Invalid transaction version".into()); } let signature = is_signed.then(|| Decode::decode(input)).transpose()?; @@ -261,7 +261,7 @@ where let length = before_length.saturating_sub(after_length); if length != expected_length.0 as usize { - return Err("Invalid length prefix".into()) + return Err("Invalid length prefix".into()); } } diff --git a/primitives/runtime/src/lib.rs b/primitives/runtime/src/lib.rs index 3752e31cbeeb0..6b748854d57c8 100644 --- a/primitives/runtime/src/lib.rs +++ b/primitives/runtime/src/lib.rs @@ -137,7 +137,7 @@ impl Justifications { /// not inserted. pub fn append(&mut self, justification: Justification) -> bool { if self.get(justification.0).is_some() { - return false + return false; } self.0.push(justification); true @@ -214,7 +214,7 @@ impl BuildStorage for sp_core::storage::Storage { if let Some(map) = storage.children_default.get_mut(&k) { map.data.extend(other_map.data.iter().map(|(k, v)| (k.clone(), v.clone()))); if !map.child_info.try_update(&other_map.child_info) { - return Err("Incompatible child info update".to_string()) + return Err("Incompatible child info update".to_string()); } } else { storage.children_default.insert(k, other_map.clone()); @@ -415,9 +415,10 @@ impl Verify for MultiSignature { (Self::Ecdsa(ref sig), who) => { let m = sp_io::hashing::blake2_256(msg.get()); match sp_io::crypto::secp256k1_ecdsa_recover_compressed(sig.as_ref(), &m) { - Ok(pubkey) => - &sp_io::hashing::blake2_256(pubkey.as_ref()) == - >::as_ref(who), + Ok(pubkey) => { + &sp_io::hashing::blake2_256(pubkey.as_ref()) + == >::as_ref(who) + }, _ => false, } }, @@ -436,8 +437,8 @@ impl Verify for AnySignature { let msg = msg.get(); sr25519::Signature::try_from(self.0.as_fixed_bytes().as_ref()) .map(|s| s.verify(msg, signer)) - .unwrap_or(false) || - ed25519::Signature::try_from(self.0.as_fixed_bytes().as_ref()) + .unwrap_or(false) + || ed25519::Signature::try_from(self.0.as_fixed_bytes().as_ref()) .map(|s| match ed25519::Public::from_slice(signer.as_ref()) { Err(()) => false, Ok(signer) => s.verify(msg, &signer), @@ -572,8 +573,9 @@ impl DispatchError { /// Return the same error but without the attached message. pub fn stripped(self) -> Self { match self { - DispatchError::Module(ModuleError { index, error, message: Some(_) }) => - DispatchError::Module(ModuleError { index, error, message: None }), + DispatchError::Module(ModuleError { index, error, message: Some(_) }) => { + DispatchError::Module(ModuleError { index, error, message: None }) + }, m => m, } } @@ -836,8 +838,8 @@ pub fn verify_encoded_lazy( macro_rules! assert_eq_error_rate { ($x:expr, $y:expr, $error:expr $(,)?) => { assert!( - ($x >= $crate::Saturating::saturating_sub($y, $error)) && - ($x <= $crate::Saturating::saturating_add($y, $error)), + ($x >= $crate::Saturating::saturating_sub($y, $error)) + && ($x <= $crate::Saturating::saturating_add($y, $error)), "{:?} != {:?} (with error rate {:?})", $x, $y, diff --git a/primitives/runtime/src/offchain/http.rs b/primitives/runtime/src/offchain/http.rs index dede4db5dd3de..06f94707c9a93 100644 --- a/primitives/runtime/src/offchain/http.rs +++ b/primitives/runtime/src/offchain/http.rs @@ -404,7 +404,7 @@ impl Iterator for ResponseBody { fn next(&mut self) -> Option { if self.error.is_some() { - return None + return None; } if self.filled_up_to.is_none() { @@ -413,7 +413,7 @@ impl Iterator for ResponseBody { match result { Err(e) => { self.error = Some(e); - return None + return None; }, Ok(0) => return None, Ok(size) => { @@ -425,7 +425,7 @@ impl Iterator for ResponseBody { if Some(self.position) == self.filled_up_to { self.filled_up_to = None; - return self.next() + return self.next(); } let result = self.buffer[self.position]; @@ -452,7 +452,7 @@ impl Headers { let raw = name.as_bytes(); for &(ref key, ref val) in &self.raw { if &**key == raw { - return str::from_utf8(val).ok() + return str::from_utf8(val).ok(); } } None diff --git a/primitives/runtime/src/offchain/storage_lock.rs b/primitives/runtime/src/offchain/storage_lock.rs index 47325743bd2f3..c80367d10c3a5 100644 --- a/primitives/runtime/src/offchain/storage_lock.rs +++ b/primitives/runtime/src/offchain/storage_lock.rs @@ -222,8 +222,8 @@ impl Lockable for BlockAndTime { type Deadline = BlockAndTimeDeadline; fn deadline(&self) -> Self::Deadline { - let block_number = ::current_block_number() + - self.expiration_block_number_offset.into(); + let block_number = ::current_block_number() + + self.expiration_block_number_offset.into(); BlockAndTimeDeadline { timestamp: offchain::timestamp().add(self.expiration_duration), block_number, @@ -231,8 +231,8 @@ impl Lockable for BlockAndTime { } fn has_expired(deadline: &Self::Deadline) -> bool { - offchain::timestamp() > deadline.timestamp && - ::current_block_number() > deadline.block_number + offchain::timestamp() > deadline.timestamp + && ::current_block_number() > deadline.block_number } fn snooze(deadline: &Self::Deadline) { diff --git a/primitives/runtime/src/traits.rs b/primitives/runtime/src/traits.rs index 1c48b1933431d..d766d21098988 100644 --- a/primitives/runtime/src/traits.rs +++ b/primitives/runtime/src/traits.rs @@ -1422,7 +1422,7 @@ impl<'a, T: codec::Input> codec::Input for AppendZerosInput<'a, T> { into[i] = b; i += 1; } else { - break + break; } } i @@ -1539,7 +1539,7 @@ impl AccountIdConversion fo fn try_from_sub_account(x: &T) -> Option<(Self, S)> { x.using_encoded(|d| { if d[0..4] != Id::TYPE_ID { - return None + return None; } let mut cursor = &d[4..]; let result = Decode::decode(&mut cursor).ok()?; diff --git a/primitives/runtime/src/transaction_validity.rs b/primitives/runtime/src/transaction_validity.rs index 4646808b8c8e3..60a3415f65478 100644 --- a/primitives/runtime/src/transaction_validity.rs +++ b/primitives/runtime/src/transaction_validity.rs @@ -105,12 +105,15 @@ impl From for &'static str { InvalidTransaction::BadProof => "Transaction has a bad signature", InvalidTransaction::AncientBirthBlock => "Transaction has an ancient birth block", InvalidTransaction::ExhaustsResources => "Transaction would exhaust the block limits", - InvalidTransaction::Payment => - "Inability to pay some fees (e.g. account balance too low)", - InvalidTransaction::BadMandatory => - "A call was labelled as mandatory, but resulted in an Error.", - InvalidTransaction::MandatoryDispatch => - "Transaction dispatch is mandatory; transactions may not have mandatory dispatches.", + InvalidTransaction::Payment => { + "Inability to pay some fees (e.g. account balance too low)" + }, + InvalidTransaction::BadMandatory => { + "A call was labelled as mandatory, but resulted in an Error." + }, + InvalidTransaction::MandatoryDispatch => { + "Transaction dispatch is mandatory; transactions may not have mandatory dispatches." + }, InvalidTransaction::Custom(_) => "InvalidTransaction custom error", InvalidTransaction::BadSigner => "Invalid signing address", } @@ -132,10 +135,12 @@ pub enum UnknownTransaction { impl From for &'static str { fn from(unknown: UnknownTransaction) -> &'static str { match unknown { - UnknownTransaction::CannotLookup => - "Could not lookup information required to validate the transaction", - UnknownTransaction::NoUnsignedValidator => - "Could not find an unsigned validator for the unsigned transaction", + UnknownTransaction::CannotLookup => { + "Could not lookup information required to validate the transaction" + }, + UnknownTransaction::NoUnsignedValidator => { + "Could not find an unsigned validator for the unsigned transaction" + }, UnknownTransaction::Custom(_) => "UnknownTransaction custom error", } } diff --git a/primitives/sandbox/src/embedded_executor.rs b/primitives/sandbox/src/embedded_executor.rs index 115c3192f3d89..7415d21f9c034 100644 --- a/primitives/sandbox/src/embedded_executor.rs +++ b/primitives/sandbox/src/embedded_executor.rs @@ -176,7 +176,7 @@ impl ImportResolver for EnvironmentDefinitionBuilder { module_name, field_name, ); - return Err(wasmi::Error::Instantiation(String::new())) + return Err(wasmi::Error::Instantiation(String::new())); }, }; Ok(FuncInstance::alloc_host(signature.clone(), host_func_idx.0)) @@ -212,7 +212,7 @@ impl ImportResolver for EnvironmentDefinitionBuilder { module_name, field_name, ); - return Err(wasmi::Error::Instantiation(String::new())) + return Err(wasmi::Error::Instantiation(String::new())); }, }; Ok(memory.memref.clone()) @@ -315,7 +315,7 @@ mod tests { fn env_assert(_e: &mut State, args: &[Value]) -> Result { if args.len() != 1 { - return Err(HostError) + return Err(HostError); } let condition = args[0].as_i32().ok_or_else(|| HostError)?; if condition != 0 { @@ -326,7 +326,7 @@ mod tests { } fn env_inc_counter(e: &mut State, args: &[Value]) -> Result { if args.len() != 1 { - return Err(HostError) + return Err(HostError); } let inc_by = args[0].as_i32().ok_or_else(|| HostError)?; e.counter += inc_by as u32; @@ -335,7 +335,7 @@ mod tests { /// Function that takes one argument of any type and returns that value. fn env_polymorphic_id(_e: &mut State, args: &[Value]) -> Result { if args.len() != 1 { - return Err(HostError) + return Err(HostError); } Ok(ReturnValue::Value(args[0])) } diff --git a/primitives/state-machine/src/basic.rs b/primitives/state-machine/src/basic.rs index fdc50e3f8f207..47127c8382cd5 100644 --- a/primitives/state-machine/src/basic.rs +++ b/primitives/state-machine/src/basic.rs @@ -121,13 +121,14 @@ impl BasicExternalities { impl PartialEq for BasicExternalities { fn eq(&self, other: &BasicExternalities) -> bool { - self.overlay.changes().map(|(k, v)| (k, v.value())).collect::>() == - other.overlay.changes().map(|(k, v)| (k, v.value())).collect::>() && - self.overlay + self.overlay.changes().map(|(k, v)| (k, v.value())).collect::>() + == other.overlay.changes().map(|(k, v)| (k, v.value())).collect::>() + && self + .overlay .children() .map(|(iter, i)| (i, iter.map(|(k, v)| (k, v.value())).collect::>())) - .collect::>() == - other + .collect::>() + == other .overlay .children() .map(|(iter, i)| { @@ -189,7 +190,7 @@ impl Externalities for BasicExternalities { fn place_storage(&mut self, key: StorageKey, maybe_value: Option) { if is_child_storage_key(&key) { warn!(target: "trie", "Refuse to set child storage key via main storage"); - return + return; } self.overlay.set_storage(key, maybe_value) @@ -226,7 +227,7 @@ impl Externalities for BasicExternalities { "Refuse to clear prefix that is part of child storage key via main storage" ); let maybe_cursor = Some(prefix.to_vec()); - return MultiRemovalResults { maybe_cursor, backend: 0, unique: 0, loops: 0 } + return MultiRemovalResults { maybe_cursor, backend: 0, unique: 0, loops: 0 }; } let count = self.overlay.clear_prefix(prefix); diff --git a/primitives/state-machine/src/ext.rs b/primitives/state-machine/src/ext.rs index 1db0ec517015b..f2eff9880583a 100644 --- a/primitives/state-machine/src/ext.rs +++ b/primitives/state-machine/src/ext.rs @@ -321,11 +321,11 @@ where // If `backend_key` is less than the `overlay_key`, we found out next key. if cmp == Some(Ordering::Less) { - return next_backend_key + return next_backend_key; } else if overlay_key.1.value().is_some() { // If there exists a value for the `overlay_key` in the overlay // (aka the key is still valid), it means we have found our next key. - return Some(overlay_key.0.to_vec()) + return Some(overlay_key.0.to_vec()); } else if cmp == Some(Ordering::Equal) { // If the `backend_key` and `overlay_key` are equal, it means that we need // to search for the next backend key, because the overlay has overwritten @@ -362,11 +362,11 @@ where // If `backend_key` is less than the `overlay_key`, we found out next key. if cmp == Some(Ordering::Less) { - return next_backend_key + return next_backend_key; } else if overlay_key.1.value().is_some() { // If there exists a value for the `overlay_key` in the overlay // (aka the key is still valid), it means we have found our next key. - return Some(overlay_key.0.to_vec()) + return Some(overlay_key.0.to_vec()); } else if cmp == Some(Ordering::Equal) { // If the `backend_key` and `overlay_key` are equal, it means that we need // to search for the next backend key, because the overlay has overwritten @@ -391,7 +391,7 @@ where let _guard = guard(); if is_child_storage_key(&key) { warn!(target: "trie", "Refuse to directly set child storage key"); - return + return; } // NOTE: be careful about touching the key names – used outside substrate! @@ -472,7 +472,7 @@ where target: "trie", "Refuse to directly clear prefix that is part or contains of child storage key", ); - return MultiRemovalResults { maybe_cursor: None, backend: 0, unique: 0, loops: 0 } + return MultiRemovalResults { maybe_cursor: None, backend: 0, unique: 0, loops: 0 }; } self.mark_dirty(); @@ -538,7 +538,7 @@ where storage_root = %HexDisplay::from(&root.as_ref()), cached = true, ); - return root.encode() + return root.encode(); } let root = @@ -761,7 +761,7 @@ where .apply_to_keys_while(maybe_child, maybe_prefix, maybe_cursor, |key| { if maybe_limit.map_or(false, |limit| loop_count == limit) { maybe_next_key = Some(key.to_vec()); - return false + return false; } let overlay = match maybe_child { Some(child_info) => self.overlay.child_storage(child_info, key), diff --git a/primitives/state-machine/src/lib.rs b/primitives/state-machine/src/lib.rs index 1f106593ede34..c04076937a98e 100644 --- a/primitives/state-machine/src/lib.rs +++ b/primitives/state-machine/src/lib.rs @@ -242,8 +242,9 @@ mod execution { /// Gets the corresponding manager for the execution strategy. pub fn get_manager(self) -> ExecutionManager> { match self { - ExecutionStrategy::AlwaysWasm => - ExecutionManager::AlwaysWasm(BackendTrustLevel::Trusted), + ExecutionStrategy::AlwaysWasm => { + ExecutionManager::AlwaysWasm(BackendTrustLevel::Trusted) + }, ExecutionStrategy::NativeWhenPossible => ExecutionManager::NativeWhenPossible, ExecutionStrategy::NativeElseWasm => ExecutionManager::NativeElseWasm, ExecutionStrategy::Both => ExecutionManager::Both(|wasm_result, native_result| { @@ -440,9 +441,10 @@ mod execution { self.overlay.rollback_transaction().expect(PROOF_CLOSE_TRANSACTION); let (wasm_result, _) = self.execute_aux(false); - if (result.is_ok() && - wasm_result.is_ok() && result.as_ref().ok() == wasm_result.as_ref().ok()) || - result.is_err() && wasm_result.is_err() + if (result.is_ok() + && wasm_result.is_ok() + && result.as_ref().ok() == wasm_result.as_ref().ok()) + || result.is_err() && wasm_result.is_err() { result } else { @@ -486,15 +488,18 @@ mod execution { { let result = { match manager { - ExecutionManager::Both(on_consensus_failure) => - self.execute_call_with_both_strategy(on_consensus_failure), - ExecutionManager::NativeElseWasm => - self.execute_call_with_native_else_wasm_strategy(), + ExecutionManager::Both(on_consensus_failure) => { + self.execute_call_with_both_strategy(on_consensus_failure) + }, + ExecutionManager::NativeElseWasm => { + self.execute_call_with_native_else_wasm_strategy() + }, ExecutionManager::AlwaysWasm(trust_level) => { let _abort_guard = match trust_level { BackendTrustLevel::Trusted => None, - BackendTrustLevel::Untrusted => - Some(sp_panic_handler::AbortGuard::never_abort()), + BackendTrustLevel::Untrusted => { + Some(sp_panic_handler::AbortGuard::never_abort()) + }, }; self.execute_aux(false).0 }, @@ -705,7 +710,7 @@ mod execution { last: &mut SmallVec<[Vec; 2]>, ) -> bool { if stopped_at == 0 || stopped_at > MAX_NESTED_TRIE_DEPTH { - return false + return false; } match stopped_at { 1 => { @@ -715,7 +720,7 @@ mod execution { match last.len() { 0 => { last.push(top_last); - return true + return true; }, 2 => { last.pop(); @@ -724,12 +729,12 @@ mod execution { } // update top trie access. last[0] = top_last; - return true + return true; } else { // No change in top trie accesses. // Indicates end of reading of a child trie. last.truncate(1); - return true + return true; } }, 2 => { @@ -743,7 +748,7 @@ mod execution { if let Some(top_last) = top_last { last.push(top_last) } else { - return false + return false; } } else if let Some(top_last) = top_last { last[0] = top_last; @@ -752,10 +757,10 @@ mod execution { last.pop(); } last.push(child_last); - return true + return true; } else { // stopped at level 2 so child last is define. - return false + return false; } }, _ => (), @@ -799,7 +804,7 @@ mod execution { H::Out: Ord + Codec, { if start_at.len() > MAX_NESTED_TRIE_DEPTH { - return Err(Box::new("Invalid start of range.")) + return Err(Box::new("Invalid start of range.")); } let recorder = sp_trie::recorder::Recorder::default(); @@ -816,7 +821,7 @@ mod execution { { child_roots.insert(state_root); } else { - return Err(Box::new("Invalid range start child trie key.")) + return Err(Box::new("Invalid range start child trie key.")); } (Some(storage_key), start_at.get(1).cloned()) @@ -829,8 +834,9 @@ mod execution { let storage_key = PrefixedStorageKey::new_ref(storage_key); ( Some(match ChildType::from_prefixed_key(storage_key) { - Some((ChildType::ParentKeyId, storage_key)) => - ChildInfo::new_default(storage_key), + Some((ChildType::ParentKeyId, storage_key)) => { + ChildInfo::new_default(storage_key) + }, None => return Err(Box::new("Invalid range start child trie key.")), }), 2, @@ -848,8 +854,8 @@ mod execution { None, start_at_ref, |key, value| { - if first && - start_at_ref + if first + && start_at_ref .as_ref() .map(|start| &key.as_slice() > start) .unwrap_or(true) @@ -859,8 +865,8 @@ mod execution { if first { true - } else if depth < MAX_NESTED_TRIE_DEPTH && - sp_core::storage::well_known_keys::is_child_storage_key( + } else if depth < MAX_NESTED_TRIE_DEPTH + && sp_core::storage::well_known_keys::is_child_storage_key( key.as_slice(), ) { count += 1; @@ -885,11 +891,11 @@ mod execution { if switch_child_key.is_none() { if depth == 1 { - break + break; } else if completed { start_at = child_key.take(); } else { - break + break; } } else { child_key = switch_child_key; @@ -1198,7 +1204,7 @@ mod execution { parent_storage_keys: Default::default(), }]; if start_at.len() > MAX_NESTED_TRIE_DEPTH { - return Err(Box::new("Invalid start of range.")) + return Err(Box::new("Invalid start of range.")); } let mut child_roots = HashSet::new(); @@ -1211,7 +1217,7 @@ mod execution { child_roots.insert(state_root.clone()); Some((storage_key, state_root)) } else { - return Err(Box::new("Invalid range start child trie key.")) + return Err(Box::new("Invalid range start child trie key.")); }; (child_key, start_at.get(1).cloned()) @@ -1230,8 +1236,9 @@ mod execution { let storage_key = PrefixedStorageKey::new_ref(storage_key); ( Some(match ChildType::from_prefixed_key(storage_key) { - Some((ChildType::ParentKeyId, storage_key)) => - ChildInfo::new_default(storage_key), + Some((ChildType::ParentKeyId, storage_key)) => { + ChildInfo::new_default(storage_key) + }, None => return Err(Box::new("Invalid range start child trie key.")), }), 2, @@ -1254,8 +1261,8 @@ mod execution { None, start_at_ref, |key, value| { - if first && - start_at_ref + if first + && start_at_ref .as_ref() .map(|start| &key.as_slice() > start) .unwrap_or(true) @@ -1268,8 +1275,8 @@ mod execution { } if first { true - } else if depth < MAX_NESTED_TRIE_DEPTH && - sp_core::storage::well_known_keys::is_child_storage_key( + } else if depth < MAX_NESTED_TRIE_DEPTH + && sp_core::storage::well_known_keys::is_child_storage_key( key.as_slice(), ) { if child_roots.contains(value.as_slice()) { @@ -1290,10 +1297,10 @@ mod execution { if switch_child_key.is_none() { if !completed { - break depth + break depth; } if depth == 1 { - break 0 + break 0; } else { start_at = child_key.take().map(|entry| entry.0); } @@ -1903,7 +1910,7 @@ mod tests { key.clone(), Some(value.clone()), )); - break + break; } } } @@ -2064,7 +2071,7 @@ mod tests { .unwrap(); if completed_depth == 0 { - break + break; } assert!(result.update_last_key(completed_depth, &mut start_at)); } diff --git a/primitives/state-machine/src/overlayed_changes/changeset.rs b/primitives/state-machine/src/overlayed_changes/changeset.rs index e5dad7157c731..835e8ab96c24b 100644 --- a/primitives/state-machine/src/overlayed_changes/changeset.rs +++ b/primitives/state-machine/src/overlayed_changes/changeset.rs @@ -289,7 +289,7 @@ impl OverlayedMap { /// Calling this while already inside the runtime will return an error. pub fn enter_runtime(&mut self) -> Result<(), AlreadyInRuntime> { if let ExecutionMode::Runtime = self.execution_mode { - return Err(AlreadyInRuntime) + return Err(AlreadyInRuntime); } self.execution_mode = ExecutionMode::Runtime; self.num_client_transactions = self.transaction_depth(); @@ -302,7 +302,7 @@ impl OverlayedMap { /// Calling this while already outside the runtime will return an error. pub fn exit_runtime(&mut self) -> Result<(), NotInRuntime> { if let ExecutionMode::Client = self.execution_mode { - return Err(NotInRuntime) + return Err(NotInRuntime); } self.execution_mode = ExecutionMode::Client; if self.has_open_runtime_transactions() { @@ -349,7 +349,7 @@ impl OverlayedMap { // runtime is not allowed to close transactions started by the client if let ExecutionMode::Runtime = self.execution_mode { if !self.has_open_runtime_transactions() { - return Err(NoOpenTransaction) + return Err(NoOpenTransaction); } } diff --git a/primitives/state-machine/src/trie_backend.rs b/primitives/state-machine/src/trie_backend.rs index da4250b6ba3e1..f91c50e32be13 100644 --- a/primitives/state-machine/src/trie_backend.rs +++ b/primitives/state-machine/src/trie_backend.rs @@ -619,8 +619,8 @@ pub mod tests { .storage_root(iter::once((&b"new-key"[..], Some(&b"new-value"[..]))), state_version); assert!(!tx.drain().is_empty()); assert!( - new_root != - test_trie(state_version, None, None) + new_root + != test_trie(state_version, None, None) .storage_root(iter::empty(), state_version) .0 ); diff --git a/primitives/state-machine/src/trie_backend_essence.rs b/primitives/state-machine/src/trie_backend_essence.rs index cdd1bb0bba055..f9d8f641913cc 100644 --- a/primitives/state-machine/src/trie_backend_essence.rs +++ b/primitives/state-machine/src/trie_backend_essence.rs @@ -273,7 +273,7 @@ where #[cfg(feature = "std")] { if let Some(result) = self.cache.read().child_root.get(child_info.storage_key()) { - return Ok(*result) + return Ok(*result); } } @@ -469,7 +469,7 @@ where Ok(None) => return, Err(e) => { debug!(target: "trie", "Error while iterating child storage: {}", e); - return + return; }, } } else { @@ -492,7 +492,7 @@ where Ok(None) => return, Err(e) => { debug!(target: "trie", "Error while iterating child storage: {}", e); - return + return; }, }; @@ -538,8 +538,9 @@ where .build(); let prefix = maybe_prefix.unwrap_or(&[]); let iter = match maybe_start_at { - Some(start_at) => - TrieDBKeyIterator::new_prefixed_then_seek(&trie, prefix, start_at), + Some(start_at) => { + TrieDBKeyIterator::new_prefixed_then_seek(&trie, prefix, start_at) + }, None => TrieDBKeyIterator::new_prefixed(&trie, prefix), }?; @@ -552,7 +553,7 @@ where .unwrap_or(true)); if !f(&key) { - break + break; } } @@ -599,7 +600,7 @@ where debug_assert!(key.starts_with(prefix)); if !f(key, value) { - return Ok(false) + return Ok(false); } } @@ -615,8 +616,9 @@ where }; match result { Ok(completed) => Ok(completed), - Err(e) if matches!(*e, TrieError::IncompleteDatabase(_)) && allow_missing_nodes => - Ok(false), + Err(e) if matches!(*e, TrieError::IncompleteDatabase(_)) && allow_missing_nodes => { + Ok(false) + }, Err(e) => Err(format!("TrieDB iteration error: {}", e)), } } @@ -727,7 +729,7 @@ where self.with_recorder_and_cache_for_storage_root(Some(child_root), |recorder, cache| { let mut eph = Ephemeral::new(self.backend_storage(), &mut write_overlay); match match state_version { - StateVersion::V0 => + StateVersion::V0 => { child_delta_trie_root::, _, _, _, _, _, _>( child_info.keyspace(), &mut eph, @@ -735,8 +737,9 @@ where delta, recorder, cache, - ), - StateVersion::V1 => + ) + }, + StateVersion::V1 => { child_delta_trie_root::, _, _, _, _, _, _>( child_info.keyspace(), &mut eph, @@ -744,7 +747,8 @@ where delta, recorder, cache, - ), + ) + }, } { Ok(ret) => (Some(ret), ret), Err(e) => { @@ -876,7 +880,7 @@ impl, H: Hasher, C: AsLocalTrieCache + Send + Sync> { fn get(&self, key: &H::Out, prefix: Prefix) -> Option { if *key == self.empty { - return Some([0u8].to_vec()) + return Some([0u8].to_vec()); } match self.storage.get(key, prefix) { Ok(x) => x, diff --git a/primitives/storage/src/lib.rs b/primitives/storage/src/lib.rs index 79c1012196bde..1644cacbd0f8a 100644 --- a/primitives/storage/src/lib.rs +++ b/primitives/storage/src/lib.rs @@ -290,8 +290,9 @@ impl ChildInfo { /// this trie. pub fn prefixed_storage_key(&self) -> PrefixedStorageKey { match self { - ChildInfo::ParentKeyId(ChildTrieParentKeyId { data }) => - ChildType::ParentKeyId.new_prefixed_key(data.as_slice()), + ChildInfo::ParentKeyId(ChildTrieParentKeyId { data }) => { + ChildType::ParentKeyId.new_prefixed_key(data.as_slice()) + }, } } diff --git a/primitives/timestamp/src/lib.rs b/primitives/timestamp/src/lib.rs index d88b1839babe6..cfb480b574b43 100644 --- a/primitives/timestamp/src/lib.rs +++ b/primitives/timestamp/src/lib.rs @@ -241,7 +241,7 @@ impl sp_inherents::InherentDataProvider for InherentDataProvider { error: &[u8], ) -> Option> { if *identifier != INHERENT_IDENTIFIER { - return None + return None; } match InherentError::try_from(&INHERENT_IDENTIFIER, error)? { @@ -253,7 +253,7 @@ impl sp_inherents::InherentDataProvider for InherentDataProvider { if valid > timestamp + max_drift { return Some(Err(sp_inherents::Error::Application(Box::from( InherentError::TooFarInFuture, - )))) + )))); } let diff = valid.checked_sub(timestamp).unwrap_or_default(); diff --git a/primitives/transaction-storage-proof/src/lib.rs b/primitives/transaction-storage-proof/src/lib.rs index fde84c1c58b1a..e4a9ce74763f3 100644 --- a/primitives/transaction-storage-proof/src/lib.rs +++ b/primitives/transaction-storage-proof/src/lib.rs @@ -101,7 +101,7 @@ impl sp_inherents::InherentDataProvider for InherentDataProvider { mut error: &[u8], ) -> Option> { if *identifier != INHERENT_IDENTIFIER { - return None + return None; } let error = InherentError::decode(&mut error).ok()?; @@ -160,12 +160,13 @@ pub mod registration { .saturating_sub(DEFAULT_STORAGE_PERIOD.into()); if number.is_zero() { // Too early to collect proofs. - return Ok(InherentDataProvider::new(None)) + return Ok(InherentDataProvider::new(None)); } let proof = match client.block_indexed_body(number)? { - Some(transactions) if !transactions.is_empty() => - Some(build_proof(parent.as_ref(), transactions)?), + Some(transactions) if !transactions.is_empty() => { + Some(build_proof(parent.as_ref(), transactions)?) + }, Some(_) | None => { // Nothing was indexed in that block. None diff --git a/primitives/trie/src/cache/mod.rs b/primitives/trie/src/cache/mod.rs index 85539cf626857..3b6420b7f9c62 100644 --- a/primitives/trie/src/cache/mod.rs +++ b/primitives/trie/src/cache/mod.rs @@ -290,7 +290,7 @@ impl<'a, H: Hasher> trie_db::TrieCache> for TrieCache<'a, H> { if let Some(res) = self.shared_inner.node_cache().get(&hash) { tracing::trace!(target: LOG_TARGET, ?hash, "Serving node from shared cache"); self.shared_node_cache_access.insert(hash); - return Ok(res) + return Ok(res); } match self.local_cache.entry(hash) { @@ -317,7 +317,7 @@ impl<'a, H: Hasher> trie_db::TrieCache> for TrieCache<'a, H> { if let Some(node) = self.shared_inner.node_cache().get(hash) { tracing::trace!(target: LOG_TARGET, ?hash, "Getting node from shared cache"); self.shared_node_cache_access.insert(*hash); - return Some(node) + return Some(node); } let res = self.local_cache.get(hash); diff --git a/primitives/trie/src/cache/shared_cache.rs b/primitives/trie/src/cache/shared_cache.rs index 9d4d36b83a28a..e4284aa5bc74f 100644 --- a/primitives/trie/src/cache/shared_cache.rs +++ b/primitives/trie/src/cache/shared_cache.rs @@ -287,10 +287,11 @@ impl PartialEq for ValueCacheKey<'_, H> { (Self::Hash { hash, .. }, _) => *hash == other.get_hash(), (_, Self::Hash { hash: other_hash, .. }) => self.get_hash() == *other_hash, // If both are not the `Hash` variant, we compare all the values. - _ => - self.get_hash() == other.get_hash() && - self.storage_root() == other.storage_root() && - self.storage_key() == other.storage_key(), + _ => { + self.get_hash() == other.get_hash() + && self.storage_root() == other.storage_root() + && self.storage_key() == other.storage_key() + }, } } } @@ -405,12 +406,14 @@ impl> SharedValueCache { "`SharedValueCached::update` was called with a key to add \ that uses the `Hash` variant. This would lead to potential hash collision!", ); - return + return; + }, + ValueCacheKey::Ref { storage_key, storage_root, hash } => { + (storage_root, storage_key.into(), hash) + }, + ValueCacheKey::Value { storage_root, storage_key, hash } => { + (storage_root, storage_key, hash) }, - ValueCacheKey::Ref { storage_key, storage_root, hash } => - (storage_root, storage_key.into(), hash), - ValueCacheKey::Value { storage_root, storage_key, hash } => - (storage_root, storage_key, hash), }; let (size_update, storage_key) = diff --git a/primitives/trie/src/node_codec.rs b/primitives/trie/src/node_codec.rs index f632320dd296d..76cc28668d61a 100644 --- a/primitives/trie/src/node_codec.rs +++ b/primitives/trie/src/node_codec.rs @@ -43,7 +43,7 @@ impl<'a> ByteSliceInput<'a> { fn take(&mut self, count: usize) -> Result, codec::Error> { if self.offset + count > self.data.len() { - return Err("out of data".into()) + return Err("out of data".into()); } let range = self.offset..(self.offset + count); @@ -65,7 +65,7 @@ impl<'a> Input for ByteSliceInput<'a> { fn read_byte(&mut self) -> Result { if self.offset + 1 > self.data.len() { - return Err("out of data".into()) + return Err("out of data".into()); } let byte = self.data[self.offset]; @@ -111,11 +111,11 @@ where let padding = nibble_count % nibble_ops::NIBBLE_PER_BYTE != 0; // check that the padding is valid (if any) if padding && nibble_ops::pad_left(data[input.offset]) != 0 { - return Err(Error::BadFormat) + return Err(Error::BadFormat); } let partial = input.take( - (nibble_count + (nibble_ops::NIBBLE_PER_BYTE - 1)) / - nibble_ops::NIBBLE_PER_BYTE, + (nibble_count + (nibble_ops::NIBBLE_PER_BYTE - 1)) + / nibble_ops::NIBBLE_PER_BYTE, )?; let partial_padding = nibble_ops::number_padding(nibble_count); let bitmap_range = input.take(BITMAP_LENGTH)?; @@ -155,11 +155,11 @@ where let padding = nibble_count % nibble_ops::NIBBLE_PER_BYTE != 0; // check that the padding is valid (if any) if padding && nibble_ops::pad_left(data[input.offset]) != 0 { - return Err(Error::BadFormat) + return Err(Error::BadFormat); } let partial = input.take( - (nibble_count + (nibble_ops::NIBBLE_PER_BYTE - 1)) / - nibble_ops::NIBBLE_PER_BYTE, + (nibble_count + (nibble_ops::NIBBLE_PER_BYTE - 1)) + / nibble_ops::NIBBLE_PER_BYTE, )?; let partial_padding = nibble_ops::number_padding(nibble_count); let value = if contains_hash { @@ -228,12 +228,15 @@ where ) -> Vec { let contains_hash = matches!(&value, Some(Value::Node(..))); let mut output = match (&value, contains_hash) { - (&None, _) => - partial_from_iterator_encode(partial, number_nibble, NodeKind::BranchNoValue), - (_, false) => - partial_from_iterator_encode(partial, number_nibble, NodeKind::BranchWithValue), - (_, true) => - partial_from_iterator_encode(partial, number_nibble, NodeKind::HashedValueBranch), + (&None, _) => { + partial_from_iterator_encode(partial, number_nibble, NodeKind::BranchNoValue) + }, + (_, false) => { + partial_from_iterator_encode(partial, number_nibble, NodeKind::BranchWithValue) + }, + (_, true) => { + partial_from_iterator_encode(partial, number_nibble, NodeKind::HashedValueBranch) + }, }; let bitmap_index = output.len(); @@ -284,10 +287,12 @@ fn partial_from_iterator_encode>( NodeKind::Leaf => NodeHeader::Leaf(nibble_count).encode_to(&mut output), NodeKind::BranchWithValue => NodeHeader::Branch(true, nibble_count).encode_to(&mut output), NodeKind::BranchNoValue => NodeHeader::Branch(false, nibble_count).encode_to(&mut output), - NodeKind::HashedValueLeaf => - NodeHeader::HashedValueLeaf(nibble_count).encode_to(&mut output), - NodeKind::HashedValueBranch => - NodeHeader::HashedValueBranch(nibble_count).encode_to(&mut output), + NodeKind::HashedValueLeaf => { + NodeHeader::HashedValueLeaf(nibble_count).encode_to(&mut output) + }, + NodeKind::HashedValueBranch => { + NodeHeader::HashedValueBranch(nibble_count).encode_to(&mut output) + }, }; output.extend(partial); output diff --git a/primitives/trie/src/node_header.rs b/primitives/trie/src/node_header.rs index f3544be65b2e9..3d74eec4d1cc5 100644 --- a/primitives/trie/src/node_header.rs +++ b/primitives/trie/src/node_header.rs @@ -54,16 +54,18 @@ impl Encode for NodeHeader { fn encode_to(&self, output: &mut T) { match self { NodeHeader::Null => output.push_byte(trie_constants::EMPTY_TRIE), - NodeHeader::Branch(true, nibble_count) => - encode_size_and_prefix(*nibble_count, trie_constants::BRANCH_WITH_MASK, 2, output), + NodeHeader::Branch(true, nibble_count) => { + encode_size_and_prefix(*nibble_count, trie_constants::BRANCH_WITH_MASK, 2, output) + }, NodeHeader::Branch(false, nibble_count) => encode_size_and_prefix( *nibble_count, trie_constants::BRANCH_WITHOUT_MASK, 2, output, ), - NodeHeader::Leaf(nibble_count) => - encode_size_and_prefix(*nibble_count, trie_constants::LEAF_PREFIX_MASK, 2, output), + NodeHeader::Leaf(nibble_count) => { + encode_size_and_prefix(*nibble_count, trie_constants::LEAF_PREFIX_MASK, 2, output) + }, NodeHeader::HashedValueBranch(nibble_count) => encode_size_and_prefix( *nibble_count, trie_constants::ALT_HASHING_BRANCH_WITH_MASK, @@ -86,14 +88,16 @@ impl Decode for NodeHeader { fn decode(input: &mut I) -> Result { let i = input.read_byte()?; if i == trie_constants::EMPTY_TRIE { - return Ok(NodeHeader::Null) + return Ok(NodeHeader::Null); } match i & (0b11 << 6) { trie_constants::LEAF_PREFIX_MASK => Ok(NodeHeader::Leaf(decode_size(i, input, 2)?)), - trie_constants::BRANCH_WITH_MASK => - Ok(NodeHeader::Branch(true, decode_size(i, input, 2)?)), - trie_constants::BRANCH_WITHOUT_MASK => - Ok(NodeHeader::Branch(false, decode_size(i, input, 2)?)), + trie_constants::BRANCH_WITH_MASK => { + Ok(NodeHeader::Branch(true, decode_size(i, input, 2)?)) + }, + trie_constants::BRANCH_WITHOUT_MASK => { + Ok(NodeHeader::Branch(false, decode_size(i, input, 2)?)) + }, trie_constants::EMPTY_TRIE => { if i & (0b111 << 5) == trie_constants::ALT_HASHING_LEAF_PREFIX_MASK { Ok(NodeHeader::HashedValueLeaf(decode_size(i, input, 3)?)) @@ -160,13 +164,13 @@ fn decode_size( let max_value = 255u8 >> prefix_mask; let mut result = (first & max_value) as usize; if result < max_value as usize { - return Ok(result) + return Ok(result); } result -= 1; loop { let n = input.read_byte()? as usize; if n < 255 { - return Ok(result + n + 1) + return Ok(result + n + 1); } result += 255; } diff --git a/primitives/trie/src/trie_codec.rs b/primitives/trie/src/trie_codec.rs index d5ae9a43fb1eb..a13531e5956c3 100644 --- a/primitives/trie/src/trie_codec.rs +++ b/primitives/trie/src/trie_codec.rs @@ -71,7 +71,7 @@ where // Only check root if expected root is passed as argument. if let Some(expected_root) = expected_root { if expected_root != &top_root { - return Err(Error::RootMismatch(top_root, *expected_root)) + return Err(Error::RootMismatch(top_root, *expected_root)); } } @@ -92,7 +92,7 @@ where let mut root = TrieHash::::default(); // still in a proof so prevent panic if root.as_mut().len() != value.as_slice().len() { - return Err(Error::InvalidChildRoot(key, value)) + return Err(Error::InvalidChildRoot(key, value)); } root.as_mut().copy_from_slice(value.as_ref()); child_tries.push(root); @@ -110,7 +110,7 @@ where } if !HashDBT::::contains(db, &top_root, EMPTY_PREFIX) { - return Err(Error::IncompleteProof) + return Err(Error::IncompleteProof); } let mut previous_extracted_child_trie = None; @@ -132,11 +132,11 @@ where if let Some(child_root) = previous_extracted_child_trie { // A child root was read from proof but is not present // in top trie. - return Err(Error::ExtraneousChildProof(child_root)) + return Err(Error::ExtraneousChildProof(child_root)); } if nodes_iter.next().is_some() { - return Err(Error::ExtraneousChildNode) + return Err(Error::ExtraneousChildNode); } Ok(top_root) @@ -171,7 +171,7 @@ where let mut root = TrieHash::::default(); if root.as_mut().len() != value.as_slice().len() { // some child trie root in top trie are not an encoded hash. - return Err(Error::InvalidChildRoot(key.to_vec(), value.to_vec())) + return Err(Error::InvalidChildRoot(key.to_vec(), value.to_vec())); } root.as_mut().copy_from_slice(value.as_ref()); child_tries.push(root); @@ -194,7 +194,7 @@ where if !HashDBT::::contains(partial_db, &child_root, EMPTY_PREFIX) { // child proof are allowed to be missing (unused root can be included // due to trie structure modification). - continue + continue; } let trie = crate::TrieDBBuilder::::new(partial_db, &child_root).build(); diff --git a/primitives/trie/src/trie_stream.rs b/primitives/trie/src/trie_stream.rs index 435e6a986722e..6b75b9b05ec54 100644 --- a/primitives/trie/src/trie_stream.rs +++ b/primitives/trie/src/trie_stream.rs @@ -57,14 +57,18 @@ fn fuse_nibbles_node(nibbles: &[u8], kind: NodeKind) -> impl Iterator let size = nibbles.len(); let iter_start = match kind { NodeKind::Leaf => size_and_prefix_iterator(size, trie_constants::LEAF_PREFIX_MASK, 2), - NodeKind::BranchNoValue => - size_and_prefix_iterator(size, trie_constants::BRANCH_WITHOUT_MASK, 2), - NodeKind::BranchWithValue => - size_and_prefix_iterator(size, trie_constants::BRANCH_WITH_MASK, 2), - NodeKind::HashedValueLeaf => - size_and_prefix_iterator(size, trie_constants::ALT_HASHING_LEAF_PREFIX_MASK, 3), - NodeKind::HashedValueBranch => - size_and_prefix_iterator(size, trie_constants::ALT_HASHING_BRANCH_WITH_MASK, 4), + NodeKind::BranchNoValue => { + size_and_prefix_iterator(size, trie_constants::BRANCH_WITHOUT_MASK, 2) + }, + NodeKind::BranchWithValue => { + size_and_prefix_iterator(size, trie_constants::BRANCH_WITH_MASK, 2) + }, + NodeKind::HashedValueLeaf => { + size_and_prefix_iterator(size, trie_constants::ALT_HASHING_LEAF_PREFIX_MASK, 3) + }, + NodeKind::HashedValueBranch => { + size_and_prefix_iterator(size, trie_constants::ALT_HASHING_BRANCH_WITH_MASK, 4) + }, }; iter_start .chain(if nibbles.len() % 2 == 1 { Some(nibbles[0]) } else { None }) diff --git a/primitives/version/proc-macro/src/decl_runtime_version.rs b/primitives/version/proc-macro/src/decl_runtime_version.rs index 9a25adfa5fca2..e7d54545832ba 100644 --- a/primitives/version/proc-macro/src/decl_runtime_version.rs +++ b/primitives/version/proc-macro/src/decl_runtime_version.rs @@ -81,8 +81,12 @@ impl ParseRuntimeVersion { fn parse_expr(init_expr: &Expr) -> Result { let init_expr = match init_expr { Expr::Struct(ref e) => e, - _ => - return Err(Error::new(init_expr.span(), "expected a struct initializer expression")), + _ => { + return Err(Error::new( + init_expr.span(), + "expected a struct initializer expression", + )) + }, }; let mut parsed = ParseRuntimeVersion::default(); @@ -95,8 +99,9 @@ impl ParseRuntimeVersion { fn parse_field_value(&mut self, field_value: &FieldValue) -> Result<()> { let field_name = match field_value.member { syn::Member::Named(ref ident) => ident, - syn::Member::Unnamed(_) => - return Err(Error::new(field_value.span(), "only named members must be used")), + syn::Member::Unnamed(_) => { + return Err(Error::new(field_value.span(), "only named members must be used")) + }, }; fn parse_once( @@ -133,7 +138,7 @@ impl ParseRuntimeVersion { // the "runtime_version" custom section. `impl_runtime_apis` is responsible for // generating a custom section with the supported runtime apis descriptor. } else { - return Err(Error::new(field_name.span(), "unknown field")) + return Err(Error::new(field_name.span(), "unknown field")); } Ok(()) @@ -142,11 +147,12 @@ impl ParseRuntimeVersion { fn parse_num_literal(expr: &Expr) -> Result { let lit = match *expr { Expr::Lit(ExprLit { lit: Lit::Int(ref lit), .. }) => lit, - _ => + _ => { return Err(Error::new( expr.span(), "only numeric literals (e.g. `10`) are supported here", - )), + )) + }, }; lit.base10_parse::() } @@ -154,11 +160,12 @@ impl ParseRuntimeVersion { fn parse_num_literal_u8(expr: &Expr) -> Result { let lit = match *expr { Expr::Lit(ExprLit { lit: Lit::Int(ref lit), .. }) => lit, - _ => + _ => { return Err(Error::new( expr.span(), "only numeric literals (e.g. `10`) are supported here", - )), + )) + }, }; lit.base10_parse::() } diff --git a/primitives/version/src/lib.rs b/primitives/version/src/lib.rs index 0bd62f0bac5aa..0c9c410d1c198 100644 --- a/primitives/version/src/lib.rs +++ b/primitives/version/src/lib.rs @@ -288,9 +288,9 @@ pub fn core_version_from_apis(apis: &ApisVec) -> Option { impl RuntimeVersion { /// Check if this version matches other version for calling into runtime. pub fn can_call_with(&self, other: &RuntimeVersion) -> bool { - self.spec_version == other.spec_version && - self.spec_name == other.spec_name && - self.authoring_version == other.authoring_version + self.spec_version == other.spec_version + && self.spec_name == other.spec_name + && self.authoring_version == other.authoring_version } /// Check if the given api with `api_id` is implemented and the version passes the given @@ -344,8 +344,8 @@ impl NativeVersion { "`spec_name` does not match `{}` vs `{}`", self.runtime_version.spec_name, other.spec_name, )) - } else if self.runtime_version.authoring_version != other.authoring_version && - !self.can_author_with.contains(&other.authoring_version) + } else if self.runtime_version.authoring_version != other.authoring_version + && !self.can_author_with.contains(&other.authoring_version) { Err(format!( "`authoring_version` does not match `{version}` vs `{other_version}` and \ diff --git a/test-utils/client/src/lib.rs b/test-utils/client/src/lib.rs index d3e71f0ad28d6..c2913668042ab 100644 --- a/test-utils/client/src/lib.rs +++ b/test-utils/client/src/lib.rs @@ -365,7 +365,7 @@ pub(crate) fn parse_rpc_result( if let Some(error) = error { return Err(serde_json::from_value(error.clone()) - .expect("the JSONRPC result's error is always valid; qed")) + .expect("the JSONRPC result's error is always valid; qed")); } Ok(RpcTransactionOutput { result, receiver }) @@ -399,7 +399,7 @@ where if notification.is_new_best { blocks.insert(notification.hash); if blocks.len() == count { - break + break; } } } diff --git a/test-utils/derive/src/lib.rs b/test-utils/derive/src/lib.rs index 06b7d2463cbd8..6ad9e7bb163f3 100644 --- a/test-utils/derive/src/lib.rs +++ b/test-utils/derive/src/lib.rs @@ -37,7 +37,7 @@ fn parse_knobs( let vis = input.vis; if !sig.inputs.is_empty() { - return Err(syn::Error::new_spanned(&sig, "No arguments expected for tests.")) + return Err(syn::Error::new_spanned(&sig, "No arguments expected for tests.")); } let crate_name = match crate_name("substrate-test-utils") { diff --git a/test-utils/runtime/src/lib.rs b/test-utils/runtime/src/lib.rs index 8bda4ea602428..58d28d6c938af 100644 --- a/test-utils/runtime/src/lib.rs +++ b/test-utils/runtime/src/lib.rs @@ -197,7 +197,7 @@ impl BlindCheckable for Extrinsic { fn check(self) -> Result { match self { Extrinsic::AuthoritiesChange(new_auth) => Ok(Extrinsic::AuthoritiesChange(new_auth)), - Extrinsic::Transfer { transfer, signature, exhaust_resources_when_not_first } => + Extrinsic::Transfer { transfer, signature, exhaust_resources_when_not_first } => { if sp_runtime::verify_encoded_lazy(&signature, &transfer, &transfer.from) { Ok(Extrinsic::Transfer { transfer, @@ -206,7 +206,8 @@ impl BlindCheckable for Extrinsic { }) } else { Err(InvalidTransaction::BadProof.into()) - }, + } + }, Extrinsic::IncludeData(v) => Ok(Extrinsic::IncludeData(v)), Extrinsic::StorageChange(key, value) => Ok(Extrinsic::StorageChange(key, value)), Extrinsic::OffchainIndexSet(key, value) => Ok(Extrinsic::OffchainIndexSet(key, value)), @@ -535,13 +536,13 @@ impl frame_support::traits::PalletInfo for Runtime { fn index() -> Option { let type_id = sp_std::any::TypeId::of::

(); if type_id == sp_std::any::TypeId::of::>() { - return Some(0) + return Some(0); } if type_id == sp_std::any::TypeId::of::>() { - return Some(1) + return Some(1); } if type_id == sp_std::any::TypeId::of::>() { - return Some(2) + return Some(2); } None @@ -549,13 +550,13 @@ impl frame_support::traits::PalletInfo for Runtime { fn name() -> Option<&'static str> { let type_id = sp_std::any::TypeId::of::

(); if type_id == sp_std::any::TypeId::of::>() { - return Some("System") + return Some("System"); } if type_id == sp_std::any::TypeId::of::>() { - return Some("Timestamp") + return Some("Timestamp"); } if type_id == sp_std::any::TypeId::of::>() { - return Some("Babe") + return Some("Babe"); } None @@ -563,13 +564,13 @@ impl frame_support::traits::PalletInfo for Runtime { fn module_name() -> Option<&'static str> { let type_id = sp_std::any::TypeId::of::

(); if type_id == sp_std::any::TypeId::of::>() { - return Some("system") + return Some("system"); } if type_id == sp_std::any::TypeId::of::>() { - return Some("pallet_timestamp") + return Some("pallet_timestamp"); } if type_id == sp_std::any::TypeId::of::>() { - return Some("pallet_babe") + return Some("pallet_babe"); } None @@ -578,13 +579,13 @@ impl frame_support::traits::PalletInfo for Runtime { use frame_support::traits::PalletInfoAccess as _; let type_id = sp_std::any::TypeId::of::

(); if type_id == sp_std::any::TypeId::of::>() { - return Some(system::Pallet::::crate_version()) + return Some(system::Pallet::::crate_version()); } if type_id == sp_std::any::TypeId::of::>() { - return Some(pallet_timestamp::Pallet::::crate_version()) + return Some(pallet_timestamp::Pallet::::crate_version()); } if type_id == sp_std::any::TypeId::of::>() { - return Some(pallet_babe::Pallet::::crate_version()) + return Some(pallet_babe::Pallet::::crate_version()); } None @@ -698,7 +699,7 @@ fn code_using_trie() -> u64 { let mut t = TrieDBMutBuilderV1::::new(&mut mdb, &mut root).build(); for (key, value) in &pairs { if t.insert(key, value).is_err() { - return 101 + return 101; } } } diff --git a/test-utils/runtime/src/system.rs b/test-utils/runtime/src/system.rs index 6e33d5c25fe6f..6e7ef2adbcf13 100644 --- a/test-utils/runtime/src/system.rs +++ b/test-utils/runtime/src/system.rs @@ -188,17 +188,17 @@ impl frame_support::traits::ExecuteBlock for BlockExecutor { /// This doesn't attempt to validate anything regarding the block. pub fn validate_transaction(utx: Extrinsic) -> TransactionValidity { if check_signature(&utx).is_err() { - return InvalidTransaction::BadProof.into() + return InvalidTransaction::BadProof.into(); } let tx = utx.transfer(); let nonce_key = tx.from.to_keyed_vec(NONCE_OF); let expected_nonce: u64 = storage::hashed::get_or(&blake2_256, &nonce_key, 0); if tx.nonce < expected_nonce { - return InvalidTransaction::Stale.into() + return InvalidTransaction::Stale.into(); } if tx.nonce > expected_nonce + 64 { - return InvalidTransaction::Future.into() + return InvalidTransaction::Future.into(); } let encode = |from: &AccountId, nonce: u64| (from, nonce).encode(); @@ -261,12 +261,15 @@ fn execute_transaction_backend(utx: &Extrinsic, extrinsic_index: u32) -> ApplyEx match utx { Extrinsic::Transfer { exhaust_resources_when_not_first: true, .. } if extrinsic_index != 0 => - Err(InvalidTransaction::ExhaustsResources.into()), + { + Err(InvalidTransaction::ExhaustsResources.into()) + }, Extrinsic::Transfer { ref transfer, .. } => execute_transfer_backend(transfer), Extrinsic::AuthoritiesChange(ref new_auth) => execute_new_authorities_backend(new_auth), Extrinsic::IncludeData(_) => Ok(Ok(())), - Extrinsic::StorageChange(key, value) => - execute_storage_change(key, value.as_ref().map(|v| &**v)), + Extrinsic::StorageChange(key, value) => { + execute_storage_change(key, value.as_ref().map(|v| &**v)) + }, Extrinsic::OffchainIndexSet(key, value) => { sp_io::offchain_index::set(key, value); Ok(Ok(())) @@ -284,7 +287,7 @@ fn execute_transfer_backend(tx: &Transfer) -> ApplyExtrinsicResult { let nonce_key = tx.from.to_keyed_vec(NONCE_OF); let expected_nonce: u64 = storage::hashed::get_or(&blake2_256, &nonce_key, 0); if tx.nonce != expected_nonce { - return Err(InvalidTransaction::Stale.into()) + return Err(InvalidTransaction::Stale.into()); } // increment nonce in storage @@ -296,7 +299,7 @@ fn execute_transfer_backend(tx: &Transfer) -> ApplyExtrinsicResult { // enact transfer if tx.amount > from_balance { - return Err(InvalidTransaction::Payment.into()) + return Err(InvalidTransaction::Payment.into()); } let to_balance_key = tx.to.to_keyed_vec(BALANCE_OF); let to_balance: u64 = storage::hashed::get_or(&blake2_256, &to_balance_key, 0); diff --git a/test-utils/runtime/transaction-pool/src/lib.rs b/test-utils/runtime/transaction-pool/src/lib.rs index f8d551a6fa5bd..4d0e7aea1884a 100644 --- a/test-utils/runtime/transaction-pool/src/lib.rs +++ b/test-utils/runtime/transaction-pool/src/lib.rs @@ -254,13 +254,14 @@ impl sc_transaction_pool::ChainApi for TestApi { if !found_best { return ready(Ok(Err(TransactionValidityError::Invalid( InvalidTransaction::Custom(1), - )))) + )))); } }, - Ok(None) => + Ok(None) => { return ready(Ok(Err(TransactionValidityError::Invalid( InvalidTransaction::Custom(2), - )))), + )))) + }, Err(e) => return ready(Err(e)), } @@ -276,7 +277,9 @@ impl sc_transaction_pool::ChainApi for TestApi { }; if self.chain.read().invalid_hashes.contains(&self.hash_and_length(&uxt).0) { - return ready(Ok(Err(TransactionValidityError::Invalid(InvalidTransaction::Custom(0))))) + return ready(Ok(Err(TransactionValidityError::Invalid(InvalidTransaction::Custom( + 0, + ))))); } let mut validity = @@ -292,8 +295,9 @@ impl sc_transaction_pool::ChainApi for TestApi { at: &BlockId, ) -> Result>, Error> { Ok(match at { - generic::BlockId::Hash(x) => - self.chain.read().block_by_hash.get(x).map(|b| *b.header.number()), + generic::BlockId::Hash(x) => { + self.chain.read().block_by_hash.get(x).map(|b| *b.header.number()) + }, generic::BlockId::Number(num) => Some(*num), }) } @@ -304,10 +308,11 @@ impl sc_transaction_pool::ChainApi for TestApi { ) -> Result::Hash>, Error> { Ok(match at { generic::BlockId::Hash(x) => Some(*x), - generic::BlockId::Number(num) => + generic::BlockId::Number(num) => { self.chain.read().block_by_number.get(num).and_then(|blocks| { blocks.iter().find(|b| b.1.is_best()).map(|b| b.0.header().hash()) - }), + }) + }, }) } @@ -329,10 +334,12 @@ impl sc_transaction_pool::ChainApi for TestApi { at: &BlockId, ) -> Result::Header>, Self::Error> { Ok(match at { - BlockId::Number(num) => - self.chain.read().block_by_number.get(num).map(|b| b[0].0.header().clone()), - BlockId::Hash(hash) => - self.chain.read().block_by_hash.get(hash).map(|b| b.header().clone()), + BlockId::Number(num) => { + self.chain.read().block_by_number.get(num).map(|b| b[0].0.header().clone()) + }, + BlockId::Hash(hash) => { + self.chain.read().block_by_hash.get(hash).map(|b| b.header().clone()) + }, }) } diff --git a/utils/build-script-utils/src/git.rs b/utils/build-script-utils/src/git.rs index db9a4b291ffdb..a00ad520b1464 100644 --- a/utils/build-script-utils/src/git.rs +++ b/utils/build-script-utils/src/git.rs @@ -33,7 +33,7 @@ pub fn rerun_if_git_head_changed() { Err(err) => { eprintln!("cargo:warning=Unable to read the Git repository: {}", err); - return + return; }, Ok(None) => {}, Ok(Some(paths)) => { @@ -41,7 +41,7 @@ pub fn rerun_if_git_head_changed() { println!("cargo:rerun-if-changed={}", p.display()); } - return + return; }, } diff --git a/utils/fork-tree/src/lib.rs b/utils/fork-tree/src/lib.rs index 76c28d910f943..eaf6983a280e5 100644 --- a/utils/fork-tree/src/lib.rs +++ b/utils/fork-tree/src/lib.rs @@ -144,7 +144,7 @@ where { if let Some(ref best_finalized_number) = self.best_finalized_number { if number <= *best_finalized_number { - return Err(Error::Revert) + return Err(Error::Revert); } } @@ -155,7 +155,7 @@ where }; if children.iter().any(|elem| elem.hash == hash) { - return Err(Error::Duplicate) + return Err(Error::Duplicate); } children.push(Node { data, hash, number, children: Default::default() }); @@ -310,7 +310,7 @@ where while root_idx < self.roots.len() { if *number <= self.roots[root_idx].number { root_idx += 1; - continue + continue; } // The second element in the stack tuple tracks what is the **next** children // index to search into. If we find an ancestor then we stop searching into @@ -326,7 +326,7 @@ where is_descendent = true; if predicate(&node.data) { found = true; - break + break; } } } @@ -334,7 +334,7 @@ where // If the element we are looking for is a descendent of the current root // then we can stop the search. if is_descendent { - break + break; } root_idx += 1; } @@ -393,9 +393,9 @@ where let mut is_first = true; for child in root_children { - if is_first && - (child.number == *number && child.hash == *hash || - child.number < *number && is_descendent_of(&child.hash, hash)?) + if is_first + && (child.number == *number && child.hash == *hash + || child.number < *number && is_descendent_of(&child.hash, hash)?) { root.children.push(child); // assuming that the tree is well formed only one child should pass this @@ -447,19 +447,19 @@ where { if let Some(ref best_finalized_number) = self.best_finalized_number { if number <= *best_finalized_number { - return Err(Error::Revert) + return Err(Error::Revert); } } // check if one of the current roots is being finalized if let Some(root) = self.finalize_root(hash) { - return Ok(FinalizationResult::Changed(Some(root))) + return Ok(FinalizationResult::Changed(Some(root))); } // make sure we're not finalizing a descendent of any root for root in self.roots.iter() { if number > root.number && is_descendent_of(&root.hash, hash)? { - return Err(Error::UnfinalizedAncestor) + return Err(Error::UnfinalizedAncestor); } } @@ -501,13 +501,13 @@ where { if let Some(ref best_finalized_number) = self.best_finalized_number { if number <= *best_finalized_number { - return Err(Error::Revert) + return Err(Error::Revert); } } // check if one of the current roots is being finalized if let Some(root) = self.finalize_root(hash) { - return Ok(FinalizationResult::Changed(Some(root))) + return Ok(FinalizationResult::Changed(Some(root))); } // we need to: @@ -522,21 +522,21 @@ where let is_finalized = root.hash == *hash; let is_descendant = !is_finalized && root.number > number && is_descendent_of(hash, &root.hash)?; - let is_ancestor = !is_finalized && - !is_descendant && root.number < number && - is_descendent_of(&root.hash, hash)?; + let is_ancestor = !is_finalized + && !is_descendant && root.number < number + && is_descendent_of(&root.hash, hash)?; (is_finalized, is_descendant, is_ancestor) }; // if we have met finalized root - open it and return if is_finalized { - return Ok(FinalizationResult::Changed(Some(self.finalize_root_at(idx)))) + return Ok(FinalizationResult::Changed(Some(self.finalize_root_at(idx)))); } // if node is descendant of finalized block - just leave it as is if is_descendant { idx += 1; - continue + continue; } // if node is ancestor of finalized block - remove it and continue with children @@ -544,7 +544,7 @@ where let root = self.roots.swap_remove(idx); self.roots.extend(root.children); changed = true; - continue + continue; } // if node is neither ancestor, nor descendant of the finalized block - remove it @@ -584,7 +584,7 @@ where { if let Some(ref best_finalized_number) = self.best_finalized_number { if number <= *best_finalized_number { - return Err(Error::Revert) + return Err(Error::Revert); } } @@ -595,14 +595,14 @@ where if predicate(&node.data) && (node.hash == *hash || is_descendent_of(&node.hash, hash)?) { for child in node.children.iter() { - if child.number <= number && - (child.hash == *hash || is_descendent_of(&child.hash, hash)?) + if child.number <= number + && (child.hash == *hash || is_descendent_of(&child.hash, hash)?) { - return Err(Error::UnfinalizedAncestor) + return Err(Error::UnfinalizedAncestor); } } - return Ok(Some(self.roots.iter().any(|root| root.hash == node.hash))) + return Ok(Some(self.roots.iter().any(|root| root.hash == node.hash))); } } @@ -630,7 +630,7 @@ where { if let Some(ref best_finalized_number) = self.best_finalized_number { if number <= *best_finalized_number { - return Err(Error::Revert) + return Err(Error::Revert); } } @@ -642,15 +642,15 @@ where if predicate(&root.data) && (root.hash == *hash || is_descendent_of(&root.hash, hash)?) { for child in root.children.iter() { - if child.number <= number && - (child.hash == *hash || is_descendent_of(&child.hash, hash)?) + if child.number <= number + && (child.hash == *hash || is_descendent_of(&child.hash, hash)?) { - return Err(Error::UnfinalizedAncestor) + return Err(Error::UnfinalizedAncestor); } } position = Some(i); - break + break; } } @@ -670,9 +670,9 @@ where let roots = std::mem::take(&mut self.roots); for root in roots { - let retain = root.number > number && is_descendent_of(hash, &root.hash)? || - root.number == number && root.hash == *hash || - is_descendent_of(&root.hash, hash)?; + let retain = root.number > number && is_descendent_of(hash, &root.hash)? + || root.number == number && root.hash == *hash + || is_descendent_of(&root.hash, hash)?; if retain { self.roots.push(root); @@ -1141,15 +1141,15 @@ mod test { // finalizing "D" will finalize a block from the tree, but it can't be applied yet // since it is not a root change. assert_eq!( - tree.finalizes_any_with_descendent_if(&"D", 10, &is_descendent_of, |c| c.effective == - 10), + tree.finalizes_any_with_descendent_if(&"D", 10, &is_descendent_of, |c| c.effective + == 10), Ok(Some(false)), ); // finalizing "E" is not allowed since there are not finalized anchestors. assert_eq!( - tree.finalizes_any_with_descendent_if(&"E", 15, &is_descendent_of, |c| c.effective == - 10), + tree.finalizes_any_with_descendent_if(&"E", 15, &is_descendent_of, |c| c.effective + == 10), Err(Error::UnfinalizedAncestor) ); @@ -1182,15 +1182,15 @@ mod test { // finalizing "F" will fail since it would finalize past "E" without finalizing "D" first assert_eq!( - tree.finalizes_any_with_descendent_if(&"F", 100, &is_descendent_of, |c| c.effective <= - 100,), + tree.finalizes_any_with_descendent_if(&"F", 100, &is_descendent_of, |c| c.effective + <= 100,), Err(Error::UnfinalizedAncestor), ); // it will work with "G" though since it is not in the same branch as "E" assert_eq!( - tree.finalizes_any_with_descendent_if(&"G", 100, &is_descendent_of, |c| c.effective <= - 100), + tree.finalizes_any_with_descendent_if(&"G", 100, &is_descendent_of, |c| c.effective + <= 100), Ok(Some(true)), ); diff --git a/utils/frame/benchmarking-cli/src/block/bench.rs b/utils/frame/benchmarking-cli/src/block/bench.rs index 5a67b11f494f5..c4a4fd0ec6741 100644 --- a/utils/frame/benchmarking-cli/src/block/bench.rs +++ b/utils/frame/benchmarking-cli/src/block/bench.rs @@ -87,7 +87,7 @@ where /// Benchmark the execution speed of historic blocks and log the results. pub fn run(&self) -> Result<()> { if self.params.from == 0 { - return Err("Cannot benchmark the genesis block".into()) + return Err("Cannot benchmark the genesis block".into()); } for i in self.params.from..=self.params.to { diff --git a/utils/frame/benchmarking-cli/src/extrinsic/bench.rs b/utils/frame/benchmarking-cli/src/extrinsic/bench.rs index 2a86c10e7ad27..3cafdfe3aa53f 100644 --- a/utils/frame/benchmarking-cli/src/extrinsic/bench.rs +++ b/utils/frame/benchmarking-cli/src/extrinsic/bench.rs @@ -138,7 +138,7 @@ where let ext_builder = if let Some(ext_builder) = ext_builder { ext_builder } else { - return Ok((builder.build()?.block, None)) + return Ok((builder.build()?.block, None)); }; // Put as many extrinsics into the block as possible and count them. @@ -156,7 +156,7 @@ where num_ext += 1; } if num_ext == 0 { - return Err("A Block must hold at least one extrinsic".into()) + return Err("A Block must hold at least one extrinsic".into()); } info!("Extrinsics per block: {}", num_ext); let block = builder.build()?.block; diff --git a/utils/frame/benchmarking-cli/src/extrinsic/cmd.rs b/utils/frame/benchmarking-cli/src/extrinsic/cmd.rs index b95cd6b5c2e42..548d02e7bf4ec 100644 --- a/utils/frame/benchmarking-cli/src/extrinsic/cmd.rs +++ b/utils/frame/benchmarking-cli/src/extrinsic/cmd.rs @@ -105,15 +105,16 @@ impl ExtrinsicCmd { list.len(), list.join("\n") ); - return Ok(()) + return Ok(()); } let pallet = self.params.pallet.clone().unwrap_or_default(); let extrinsic = self.params.extrinsic.clone().unwrap_or_default(); let ext_builder = match ext_factory.try_get(&pallet, &extrinsic) { Some(ext_builder) => ext_builder, - None => - return Err("Unknown pallet or extrinsic. Use --list for a complete list.".into()), + None => { + return Err("Unknown pallet or extrinsic. Use --list for a complete list.".into()) + }, }; let bench = Benchmark::new(client, self.params.bench.clone(), inherent_data, digest_items); diff --git a/utils/frame/benchmarking-cli/src/machine/mod.rs b/utils/frame/benchmarking-cli/src/machine/mod.rs index 82b4e5be7358e..a87088bd0fb62 100644 --- a/utils/frame/benchmarking-cli/src/machine/mod.rs +++ b/utils/frame/benchmarking-cli/src/machine/mod.rs @@ -210,7 +210,7 @@ impl MachineCmd { /// Validates the CLI arguments. fn validate_args(&self) -> Result<()> { if self.tolerance > 100.0 || self.tolerance < 0.0 { - return Err("The --tolerance argument is out of range".into()) + return Err("The --tolerance argument is out of range".into()); } Ok(()) } diff --git a/utils/frame/benchmarking-cli/src/overhead/template.rs b/utils/frame/benchmarking-cli/src/overhead/template.rs index ceed34d1981f9..5fb64057aa2a4 100644 --- a/utils/frame/benchmarking-cli/src/overhead/template.rs +++ b/utils/frame/benchmarking-cli/src/overhead/template.rs @@ -117,7 +117,7 @@ impl TemplateData { let mut path = weight_out.clone().unwrap_or_else(|| PathBuf::from(".")); if !path.is_dir() { - return Err("Need directory as --weight-path".into()) + return Err("Need directory as --weight-path".into()); } path.push(format!("{}_weights.rs", self.short_name)); Ok(path) diff --git a/utils/frame/benchmarking-cli/src/pallet/command.rs b/utils/frame/benchmarking-cli/src/pallet/command.rs index 242f0e685290f..a4d40f1cfd66a 100644 --- a/utils/frame/benchmarking-cli/src/pallet/command.rs +++ b/utils/frame/benchmarking-cli/src/pallet/command.rs @@ -61,7 +61,7 @@ fn combine_batches( db_batches: Vec, ) -> Vec { if time_batches.is_empty() && db_batches.is_empty() { - return Default::default() + return Default::default(); } let mut all_benchmarks = @@ -121,34 +121,36 @@ impl PalletCmd { { if let Some(output_path) = &self.output { if !output_path.is_dir() && output_path.file_name().is_none() { - return Err("Output file or path is invalid!".into()) + return Err("Output file or path is invalid!".into()); } } if let Some(header_file) = &self.header { if !header_file.is_file() { - return Err("Header file is invalid!".into()) + return Err("Header file is invalid!".into()); }; } if let Some(handlebars_template_file) = &self.template { if !handlebars_template_file.is_file() { - return Err("Handlebars template file is invalid!".into()) + return Err("Handlebars template file is invalid!".into()); }; } if let Some(json_input) = &self.json_input { let raw_data = match std::fs::read(json_input) { Ok(raw_data) => raw_data, - Err(error) => - return Err(format!("Failed to read {:?}: {}", json_input, error).into()), + Err(error) => { + return Err(format!("Failed to read {:?}: {}", json_input, error).into()) + }, }; let batches: Vec = match serde_json::from_slice(&raw_data) { Ok(batches) => batches, - Err(error) => - return Err(format!("Failed to deserialize {:?}: {}", json_input, error).into()), + Err(error) => { + return Err(format!("Failed to deserialize {:?}: {}", json_input, error).into()) + }, }; - return self.output_from_results(&batches) + return self.output_from_results(&batches); } let spec = config.chain_spec; @@ -214,9 +216,9 @@ impl PalletCmd { .for_each(|item| { for benchmark in &item.benchmarks { let benchmark_name = &benchmark.name; - if extrinsic.is_empty() || - extrinsic.as_bytes() == &b"*"[..] || - extrinsics.contains(&&benchmark_name[..]) + if extrinsic.is_empty() + || extrinsic.as_bytes() == &b"*"[..] + || extrinsics.contains(&&benchmark_name[..]) { benchmarks_to_run.push(( item.pallet.clone(), @@ -228,13 +230,13 @@ impl PalletCmd { }); if benchmarks_to_run.is_empty() { - return Err("No benchmarks found which match your input.".into()) + return Err("No benchmarks found which match your input.".into()); } if self.list { // List benchmarks instead of running them list_benchmark(benchmarks_to_run); - return Ok(()) + return Ok(()); } // Run the benchmarks @@ -265,7 +267,7 @@ impl PalletCmd { // The slope logic needs at least two points // to compute a slope. if self.steps < 2 { - return Err("`steps` must be at least 2.".into()) + return Err("`steps` must be at least 2.".into()); } let step_size = (diff as f32 / (self.steps - 1) as f32).max(0.0); @@ -499,7 +501,7 @@ impl PalletCmd { fs::write(path, json)?; } else { print!("{json}"); - return Ok(true) + return Ok(true); } } @@ -522,7 +524,7 @@ impl PalletCmd { // Skip raw data + analysis if there are no results if batch.time_results.is_empty() { - continue + continue; } if !self.no_storage_info { diff --git a/utils/frame/benchmarking-cli/src/pallet/writer.rs b/utils/frame/benchmarking-cli/src/pallet/writer.rs index a52bbcd229cb1..76133f009cc19 100644 --- a/utils/frame/benchmarking-cli/src/pallet/writer.rs +++ b/utils/frame/benchmarking-cli/src/pallet/writer.rs @@ -126,7 +126,7 @@ fn map_results( ) -> Result>, std::io::Error> { // Skip if batches is empty. if batches.is_empty() { - return Err(io_error("empty batches")) + return Err(io_error("empty batches")); } let mut all_benchmarks = HashMap::<_, Vec>::new(); @@ -134,7 +134,7 @@ fn map_results( for batch in batches { // Skip if there are no results if batch.time_results.is_empty() { - continue + continue; } let pallet_string = String::from_utf8(batch.pallet.clone()).unwrap(); @@ -405,13 +405,13 @@ pub(crate) fn add_storage_comments( for (key, reads, writes, whitelisted) in &result.keys { // skip keys which are whitelisted if *whitelisted { - continue + continue; } let prefix_length = key.len().min(32); let prefix = key[0..prefix_length].to_vec(); if identified.contains(&prefix) { // skip adding comments for keys we already identified - continue + continue; } else { // track newly identified keys identified.insert(prefix.clone()); @@ -515,7 +515,7 @@ mod test { benchmark: [benchmark.to_vec(), b"_benchmark".to_vec()].concat(), time_results: results.clone(), db_results: results, - } + }; } fn check_data(benchmark: &BenchmarkData, component: &str, base: u128, slope: u128) { diff --git a/utils/frame/benchmarking-cli/src/shared/stats.rs b/utils/frame/benchmarking-cli/src/shared/stats.rs index ffae4a17724f8..e9e436d2a7e21 100644 --- a/utils/frame/benchmarking-cli/src/shared/stats.rs +++ b/utils/frame/benchmarking-cli/src/shared/stats.rs @@ -71,7 +71,7 @@ impl Stats { /// Calculates statistics and returns them. pub fn new(xs: &Vec) -> Result { if xs.is_empty() { - return Err("Empty input is invalid".into()) + return Err("Empty input is invalid".into()); } let (avg, stddev) = Self::avg_and_stddev(xs); diff --git a/utils/frame/benchmarking-cli/src/shared/weight_params.rs b/utils/frame/benchmarking-cli/src/shared/weight_params.rs index 030bbfa00d468..d1e9dc959ddd3 100644 --- a/utils/frame/benchmarking-cli/src/shared/weight_params.rs +++ b/utils/frame/benchmarking-cli/src/shared/weight_params.rs @@ -57,7 +57,7 @@ pub struct WeightParams { impl WeightParams { pub fn calc_weight(&self, stat: &Stats) -> Result { if self.weight_mul.is_sign_negative() || !self.weight_mul.is_normal() { - return Err("invalid floating number for `weight_mul`".into()) + return Err("invalid floating number for `weight_mul`".into()); } let s = stat.select(self.weight_metric) as f64; let w = s.mul_add(self.weight_mul, self.weight_add as f64).ceil(); diff --git a/utils/frame/benchmarking-cli/src/storage/cmd.rs b/utils/frame/benchmarking-cli/src/storage/cmd.rs index ce2d52e57d641..005a682ba733e 100644 --- a/utils/frame/benchmarking-cli/src/storage/cmd.rs +++ b/utils/frame/benchmarking-cli/src/storage/cmd.rs @@ -178,7 +178,7 @@ impl StorageCmd { if let Some((ChildType::ParentKeyId, storage_key)) = ChildType::from_prefixed_key(&PrefixedStorageKey::new(key)) { - return Some(ChildInfo::new_default(storage_key)) + return Some(ChildInfo::new_default(storage_key)); } None } diff --git a/utils/frame/benchmarking-cli/src/storage/write.rs b/utils/frame/benchmarking-cli/src/storage/write.rs index 55a7b60d55552..3b40e6662abca 100644 --- a/utils/frame/benchmarking-cli/src/storage/write.rs +++ b/utils/frame/benchmarking-cli/src/storage/write.rs @@ -99,7 +99,7 @@ impl StorageCmd { state_col, None, ) { - break + break; } } @@ -139,7 +139,7 @@ impl StorageCmd { state_col, Some(&info), ) { - break + break; } } @@ -239,7 +239,7 @@ fn check_new_value( db.sanitize_key(&mut k); if db.get(col, &k).is_some() { trace!("Benchmark-store key creation: Key collision detected, retry"); - return false + return false; } } } diff --git a/utils/frame/frame-utilities-cli/src/pallet_id.rs b/utils/frame/frame-utilities-cli/src/pallet_id.rs index 2a80e3a3d312d..e625f4cfc779d 100644 --- a/utils/frame/frame-utilities-cli/src/pallet_id.rs +++ b/utils/frame/frame-utilities-cli/src/pallet_id.rs @@ -63,7 +63,7 @@ impl PalletIdCmd { R::AccountId: Ss58Codec, { if self.id.len() != 8 { - return Err("a module id must be a string of 8 characters".into()) + return Err("a module id must be a string of 8 characters".into()); } let password = self.keystore_params.read_password()?; diff --git a/utils/frame/generate-bags/src/lib.rs b/utils/frame/generate-bags/src/lib.rs index 23da131a668d8..c3fef7307e898 100644 --- a/utils/frame/generate-bags/src/lib.rs +++ b/utils/frame/generate-bags/src/lib.rs @@ -94,7 +94,7 @@ fn path_to_header_file() -> Option { for file_name in &["HEADER-APACHE2", "HEADER-GPL3", "HEADER", "file_header.txt"] { let path = workdir.join(file_name); if path.exists() { - return Some(path) + return Some(path); } } None @@ -146,7 +146,7 @@ pub fn thresholds( thresholds.push(successor as VoteWeight); } else { eprintln!("unexpectedly exceeded weight limit; breaking threshold generation loop"); - break + break; } } diff --git a/utils/frame/remote-externalities/src/lib.rs b/utils/frame/remote-externalities/src/lib.rs index 86cfc767bf3b5..4e6765b73b7f8 100644 --- a/utils/frame/remote-externalities/src/lib.rs +++ b/utils/frame/remote-externalities/src/lib.rs @@ -303,7 +303,7 @@ where if page_len < PAGE as usize { log::debug!(target: LOG_TARGET, "last page received: {}", page_len); - break all_keys + break all_keys; } else { let new_last_key = all_keys.last().expect("all_keys is populated; has .last(); qed"); @@ -578,7 +578,7 @@ where Some((ChildType::ParentKeyId, storage_key)) => storage_key, None => { log::error!(target: LOG_TARGET, "invalid key: {:?}", prefixed_top_key); - return Err("Invalid child key") + return Err("Invalid child key"); }, }; @@ -816,7 +816,7 @@ where for (k, v) in top_kv { // skip writing the child root data. if is_default_child_storage_key(k.as_ref()) { - continue + continue; } ext.insert(k.0, v.0); } @@ -956,9 +956,9 @@ mod remote_tests { .into_iter() .map(|d| d.unwrap()) .filter(|p| { - p.path().file_name().unwrap_or_default() == "offline_else_online_works_data" || - p.path().extension().unwrap_or_default() == "top" || - p.path().extension().unwrap_or_default() == "child" + p.path().file_name().unwrap_or_default() == "offline_else_online_works_data" + || p.path().extension().unwrap_or_default() == "top" + || p.path().extension().unwrap_or_default() == "child" }) .collect::>(); assert!(to_delete.len() > 0); @@ -1053,9 +1053,9 @@ mod remote_tests { .into_iter() .map(|d| d.unwrap()) .filter(|p| { - p.path().file_name().unwrap_or_default() == "can_create_top_snapshot_data" || - p.path().extension().unwrap_or_default() == "top" || - p.path().extension().unwrap_or_default() == "child" + p.path().file_name().unwrap_or_default() == "can_create_top_snapshot_data" + || p.path().extension().unwrap_or_default() == "top" + || p.path().extension().unwrap_or_default() == "child" }) .collect::>(); @@ -1094,9 +1094,9 @@ mod remote_tests { .into_iter() .map(|d| d.unwrap()) .filter(|p| { - p.path().file_name().unwrap_or_default() == "can_create_child_snapshot_data" || - p.path().extension().unwrap_or_default() == "top" || - p.path().extension().unwrap_or_default() == "child" + p.path().file_name().unwrap_or_default() == "can_create_child_snapshot_data" + || p.path().extension().unwrap_or_default() == "top" + || p.path().extension().unwrap_or_default() == "child" }) .collect::>(); @@ -1132,9 +1132,9 @@ mod remote_tests { .into_iter() .map(|d| d.unwrap()) .filter(|p| { - p.path().file_name().unwrap_or_default() == "can_fetch_all_data" || - p.path().extension().unwrap_or_default() == "top" || - p.path().extension().unwrap_or_default() == "child" + p.path().file_name().unwrap_or_default() == "can_fetch_all_data" + || p.path().extension().unwrap_or_default() == "top" + || p.path().extension().unwrap_or_default() == "child" }) .collect::>(); diff --git a/utils/frame/rpc/state-trie-migration-rpc/src/lib.rs b/utils/frame/rpc/state-trie-migration-rpc/src/lib.rs index ab180c7d45d5b..74f68e9238d5a 100644 --- a/utils/frame/rpc/state-trie-migration-rpc/src/lib.rs +++ b/utils/frame/rpc/state-trie-migration-rpc/src/lib.rs @@ -52,14 +52,15 @@ fn count_migrate<'a, H: Hasher>( for node in iter_node { let node = node.map_err(|e| format!("TrieDB node iterator error: {}", e))?; match node.2.node_plan() { - NodePlan::Leaf { value, .. } | NodePlan::NibbledBranch { value: Some(value), .. } => + NodePlan::Leaf { value, .. } | NodePlan::NibbledBranch { value: Some(value), .. } => { if let ValuePlan::Inline(range) = value { - if (range.end - range.start) as u32 >= - sp_core::storage::TRIE_VALUE_NODE_THRESHOLD + if (range.end - range.start) as u32 + >= sp_core::storage::TRIE_VALUE_NODE_THRESHOLD { nb += 1; } - }, + } + }, _ => (), } } diff --git a/utils/frame/try-runtime/cli/src/lib.rs b/utils/frame/try-runtime/cli/src/lib.rs index f54354342bf28..b0e5ffe872702 100644 --- a/utils/frame/try-runtime/cli/src/lib.rs +++ b/utils/frame/try-runtime/cli/src/lib.rs @@ -132,7 +132,7 @@ //! added, given the right flag: //! //! ```ignore -//! +//! //! #[cfg(feature = try-runtime)] //! fn pre_upgrade() -> Result, &'static str> {} //! @@ -504,10 +504,11 @@ impl State { ::Err: Debug, { Ok(match self { - State::Snap { snapshot_path } => + State::Snap { snapshot_path } => { Builder::::new().mode(Mode::Offline(OfflineConfig { state_snapshot: SnapshotConfig::new(snapshot_path), - })), + })) + }, State::Live { snapshot_path, pallet, uri, at, child_tree } => { let at = match at { Some(at_str) => Some(hash_of::(at_str)?), @@ -553,34 +554,38 @@ impl TryRuntimeCmd { ExecDispatch: NativeExecutionDispatch + 'static, { match &self.command { - Command::OnRuntimeUpgrade(ref cmd) => + Command::OnRuntimeUpgrade(ref cmd) => { commands::on_runtime_upgrade::on_runtime_upgrade::( self.shared.clone(), cmd.clone(), config, ) - .await, - Command::OffchainWorker(cmd) => + .await + }, + Command::OffchainWorker(cmd) => { commands::offchain_worker::offchain_worker::( self.shared.clone(), cmd.clone(), config, ) - .await, - Command::ExecuteBlock(cmd) => + .await + }, + Command::ExecuteBlock(cmd) => { commands::execute_block::execute_block::( self.shared.clone(), cmd.clone(), config, ) - .await, - Command::FollowChain(cmd) => + .await + }, + Command::FollowChain(cmd) => { commands::follow_chain::follow_chain::( self.shared.clone(), cmd.clone(), config, ) - .await, + .await + }, } } } diff --git a/utils/wasm-builder/src/builder.rs b/utils/wasm-builder/src/builder.rs index 81a8693968188..579b3792d49b5 100644 --- a/utils/wasm-builder/src/builder.rs +++ b/utils/wasm-builder/src/builder.rs @@ -156,7 +156,7 @@ impl WasmBuilder { provide_dummy_wasm_binary_if_not_exist(&file_path); - return + return; } build_project( @@ -186,8 +186,8 @@ fn generate_crate_skip_build_env_name() -> String { /// Checks if the build of the WASM binary should be skipped. fn check_skip_build() -> bool { - env::var(crate::SKIP_BUILD_ENV).is_ok() || - env::var(generate_crate_skip_build_env_name()).is_ok() + env::var(crate::SKIP_BUILD_ENV).is_ok() + || env::var(generate_crate_skip_build_env_name()).is_ok() } /// Provide a dummy WASM binary if there doesn't exist one. diff --git a/utils/wasm-builder/src/lib.rs b/utils/wasm-builder/src/lib.rs index fc86a06170a50..adcf45417225b 100644 --- a/utils/wasm-builder/src/lib.rs +++ b/utils/wasm-builder/src/lib.rs @@ -254,8 +254,9 @@ impl CargoCommand { // variable is set, we can assume that whatever rust compiler we have, it is a nightly // compiler. For "more" information, see: // https://github.com/rust-lang/rust/blob/fa0f7d0080d8e7e9eb20aa9cbf8013f96c81287f/src/libsyntax/feature_gate/check.rs#L891 - env::var("RUSTC_BOOTSTRAP").is_ok() || - self.command() + env::var("RUSTC_BOOTSTRAP").is_ok() + || self + .command() .arg("--version") .output() .map_err(|_| ()) diff --git a/utils/wasm-builder/src/prerequisites.rs b/utils/wasm-builder/src/prerequisites.rs index fb04dc3c98fb2..9c37d8279b957 100644 --- a/utils/wasm-builder/src/prerequisites.rs +++ b/utils/wasm-builder/src/prerequisites.rs @@ -38,7 +38,7 @@ pub(crate) fn check() -> Result { let cargo_command = crate::get_nightly_cargo(); if !cargo_command.is_nightly() { - return Err(print_error_message("Rust nightly not installed, please install it!")) + return Err(print_error_message("Rust nightly not installed, please install it!")); } check_wasm_toolchain_installed(cargo_command) @@ -160,8 +160,9 @@ fn check_wasm_toolchain_installed( )) } else { match String::from_utf8(s.stderr) { - Ok(ref err) if err.contains("linker `rust-lld` not found") => - Err(print_error_message("`rust-lld` not found, please install it!")), + Ok(ref err) if err.contains("linker `rust-lld` not found") => { + Err(print_error_message("`rust-lld` not found, please install it!")) + }, Ok(ref err) => Err(format!( "{}\n\n{}\n{}\n{}{}\n", err_msg, diff --git a/utils/wasm-builder/src/wasm_project.rs b/utils/wasm-builder/src/wasm_project.rs index 07219676413fc..c0ea5a2641c21 100644 --- a/utils/wasm-builder/src/wasm_project.rs +++ b/utils/wasm-builder/src/wasm_project.rs @@ -194,11 +194,11 @@ fn find_cargo_lock(cargo_manifest: &Path) -> Option { fn find_impl(mut path: PathBuf) -> Option { loop { if path.join("Cargo.lock").exists() { - return Some(path.join("Cargo.lock")) + return Some(path.join("Cargo.lock")); } if !path.pop() { - return None + return None; } } } @@ -207,7 +207,7 @@ fn find_cargo_lock(cargo_manifest: &Path) -> Option { let path = PathBuf::from(workspace); if path.join("Cargo.lock").exists() { - return Some(path.join("Cargo.lock")) + return Some(path.join("Cargo.lock")); } else { build_helper::warning!( "`{}` env variable doesn't point to a directory that contains a `Cargo.lock`.", @@ -217,7 +217,7 @@ fn find_cargo_lock(cargo_manifest: &Path) -> Option { } if let Some(path) = find_impl(build_helper::out_dir()) { - return Some(path) + return Some(path); } build_helper::warning!( @@ -262,10 +262,11 @@ fn get_wasm_workspace_root() -> PathBuf { loop { match out_dir.parent() { Some(parent) if out_dir.ends_with("build") => return parent.to_path_buf(), - _ => + _ => { if !out_dir.pop() { - break - }, + break; + } + }, } } @@ -404,19 +405,20 @@ fn project_enabled_features( // this heuristic anymore. However, for the transition phase between now and namespaced // features already being present in nightly, we need this code to make // runtimes compile with all the possible rustc versions. - if v.len() == 1 && - v.get(0).map_or(false, |v| *v == format!("dep:{}", f)) && - std_enabled.as_ref().map(|e| e.iter().any(|ef| ef == *f)).unwrap_or(false) + if v.len() == 1 + && v.get(0).map_or(false, |v| *v == format!("dep:{}", f)) + && std_enabled.as_ref().map(|e| e.iter().any(|ef| ef == *f)).unwrap_or(false) { - return false + return false; } // We don't want to enable the `std`/`default` feature for the wasm build and // we need to check if the feature is enabled by checking the env variable. - *f != "std" && - *f != "default" && env::var(format!("CARGO_FEATURE_{}", feature_env)) - .map(|v| v == "1") - .unwrap_or_default() + *f != "std" + && *f != "default" + && env::var(format!("CARGO_FEATURE_{}", feature_env)) + .map(|v| v == "1") + .unwrap_or_default() }) .map(|d| d.0.clone()) .collect::>(); From 8caecbb98d6af6f554be10675e977593937b1eea Mon Sep 17 00:00:00 2001 From: Jegor Sidorenko <5252494+jsidorenko@users.noreply.github.com> Date: Mon, 21 Nov 2022 09:19:36 +0200 Subject: [PATCH 025/101] [Uniques V2] Smart attributes (#12702) * Basics * WIP: change the data format * Refactor * Remove redundant new() method * Rename settings * Enable tests * Chore * Change params order * Delete the config on collection removal * Chore * Remove redundant system features * Rename force_item_status to force_collection_status * Update node runtime * Chore * Remove thaw_collection * Chore * Connect collection.is_frozen to config * Allow to lock the collection in a new way * Move free_holding into settings * Connect collection's metadata locker to feature flags * DRY * Chore * Connect pallet level feature flags * Prepare tests for the new changes * Implement Item settings * Allow to lock the metadata or attributes of an item * Common -> Settings * Extract settings related code to a separate file * Move feature flag checks inside the do_* methods * Split settings.rs into parts * Extract repeated code into macro * Extract macros into their own file * Chore * Fix traits * Fix traits * Test SystemFeatures * Fix benchmarks * Add missing benchmark * Fix node/runtime/lib.rs * ".git/.scripts/bench-bot.sh" pallet dev pallet_nfts * Keep item's config on burn if it's not empty * Fix the merge artifacts * Fmt * Add SystemFeature::NoSwaps check * Rename SystemFeatures to PalletFeatures * Rename errors * Add docs * Change error message * Change the format of CollectionConfig to store more data * Move max supply to the CollectionConfig and allow to change it * Remove ItemConfig from the mint() function and use the one set in mint settings * Add different mint options * Allow to change the mint settings * Add a force_mint() method * Check mint params * Some optimisations * Cover with tests * Remove merge artifacts * Chore * Use the new has_role() method * Rework item deposits * More tests * Refactoring * Address comments * Refactor lock_collection() * Update frame/nfts/src/types.rs Co-authored-by: Squirrel * Update frame/nfts/src/types.rs Co-authored-by: Squirrel * Update frame/nfts/src/lib.rs Co-authored-by: Squirrel * Update frame/nfts/src/lib.rs Co-authored-by: Squirrel * Private => Issuer * Add more tests * Fix benchmarks * Add benchmarks for new methods * [Uniques v2] Refactoring (#12570) * Move do_set_price() and do_buy_item() to buy_sell.rs * Move approvals to feature file * Move metadata to feature files * Move the rest of methods to feature files * Remove artifacts * Smart attributes * Split force_collection_status into 2 methods * Fix benchmarks * Fix benchmarks * Update deps * Fix merge artifact * Weights + benchmarks + docs * Change params order * Chore * Update frame/nfts/src/lib.rs Co-authored-by: Squirrel * Update frame/nfts/src/lib.rs Co-authored-by: Squirrel * Update docs * Update frame/nfts/src/lib.rs Co-authored-by: Squirrel * Add PalletId * Chore * Add tests * More tests * Add doc * Update errors snapshots * Ensure we track the owner_deposit field correctly Co-authored-by: command-bot <> Co-authored-by: Squirrel --- bin/node/runtime/src/lib.rs | 2 + frame/nfts/src/benchmarking.rs | 106 +++- frame/nfts/src/features/attributes.rs | 292 +++++++++-- .../src/features/create_delete_collection.rs | 17 +- frame/nfts/src/features/create_delete_item.rs | 1 + frame/nfts/src/features/metadata.rs | 10 +- frame/nfts/src/features/transfer.rs | 5 +- frame/nfts/src/impl_nonfungibles.rs | 11 +- frame/nfts/src/lib.rs | 143 +++++- frame/nfts/src/mock.rs | 1 + frame/nfts/src/tests.rs | 464 ++++++++++++++++-- frame/nfts/src/types.rs | 33 +- frame/nfts/src/weights.rs | 57 +++ frame/support/src/lib.rs | 4 +- frame/support/src/traits/tokens.rs | 4 +- frame/support/src/traits/tokens/misc.rs | 16 + .../src/traits/tokens/nonfungible_v2.rs | 35 +- .../src/traits/tokens/nonfungibles_v2.rs | 9 +- ...ev_mode_without_arg_max_encoded_len.stderr | 2 +- ...age_ensure_span_are_ok_on_wrong_gen.stderr | 6 +- ...re_span_are_ok_on_wrong_gen_unnamed.stderr | 6 +- .../pallet_ui/storage_info_unsatisfied.stderr | 2 +- .../storage_info_unsatisfied_nmap.stderr | 2 +- 23 files changed, 1083 insertions(+), 145 deletions(-) diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index be083efcc0706..5b6cb2ec574d3 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -1491,6 +1491,7 @@ parameter_types! { pub const KeyLimit: u32 = 32; pub const ValueLimit: u32 = 256; pub const ApprovalsLimit: u32 = 20; + pub const ItemAttributesApprovalsLimit: u32 = 20; pub const MaxTips: u32 = 10; pub const MaxDeadlineDuration: BlockNumber = 12 * 30 * DAYS; } @@ -1535,6 +1536,7 @@ impl pallet_nfts::Config for Runtime { type KeyLimit = KeyLimit; type ValueLimit = ValueLimit; type ApprovalsLimit = ApprovalsLimit; + type ItemAttributesApprovalsLimit = ItemAttributesApprovalsLimit; type MaxTips = MaxTips; type MaxDeadlineDuration = MaxDeadlineDuration; type Features = Features; diff --git a/frame/nfts/src/benchmarking.rs b/frame/nfts/src/benchmarking.rs index 61407abd9f985..5e1b0237ca3ec 100644 --- a/frame/nfts/src/benchmarking.rs +++ b/frame/nfts/src/benchmarking.rs @@ -114,6 +114,7 @@ fn add_item_attribute, I: 'static>( SystemOrigin::Signed(caller.clone()).into(), T::Helper::collection(0), Some(item), + AttributeNamespace::CollectionOwner, key.clone(), vec![0; T::ValueLimit::get() as usize].try_into().unwrap(), )); @@ -338,10 +339,38 @@ benchmarks_instance_pallet! { let (collection, caller, _) = create_collection::(); let (item, ..) = mint_item::(0); - add_item_metadata::(item); - }: _(SystemOrigin::Signed(caller), collection, Some(item), key.clone(), value.clone()) + }: _(SystemOrigin::Signed(caller), collection, Some(item), AttributeNamespace::CollectionOwner, key.clone(), value.clone()) + verify { + assert_last_event::( + Event::AttributeSet { + collection, + maybe_item: Some(item), + namespace: AttributeNamespace::CollectionOwner, + key, + value, + } + .into(), + ); + } + + force_set_attribute { + let key: BoundedVec<_, _> = vec![0u8; T::KeyLimit::get() as usize].try_into().unwrap(); + let value: BoundedVec<_, _> = vec![0u8; T::ValueLimit::get() as usize].try_into().unwrap(); + + let (collection, caller, _) = create_collection::(); + let (item, ..) = mint_item::(0); + }: _(SystemOrigin::Root, Some(caller), collection, Some(item), AttributeNamespace::CollectionOwner, key.clone(), value.clone()) verify { - assert_last_event::(Event::AttributeSet { collection, maybe_item: Some(item), key, value }.into()); + assert_last_event::( + Event::AttributeSet { + collection, + maybe_item: Some(item), + namespace: AttributeNamespace::CollectionOwner, + key, + value, + } + .into(), + ); } clear_attribute { @@ -349,9 +378,76 @@ benchmarks_instance_pallet! { let (item, ..) = mint_item::(0); add_item_metadata::(item); let (key, ..) = add_item_attribute::(item); - }: _(SystemOrigin::Signed(caller), collection, Some(item), key.clone()) + }: _(SystemOrigin::Signed(caller), collection, Some(item), AttributeNamespace::CollectionOwner, key.clone()) + verify { + assert_last_event::( + Event::AttributeCleared { + collection, + maybe_item: Some(item), + namespace: AttributeNamespace::CollectionOwner, + key, + }.into(), + ); + } + + approve_item_attributes { + let (collection, caller, _) = create_collection::(); + let (item, ..) = mint_item::(0); + let target: T::AccountId = account("target", 0, SEED); + let target_lookup = T::Lookup::unlookup(target.clone()); + }: _(SystemOrigin::Signed(caller), collection, item, target_lookup) verify { - assert_last_event::(Event::AttributeCleared { collection, maybe_item: Some(item), key }.into()); + assert_last_event::( + Event::ItemAttributesApprovalAdded { + collection, + item, + delegate: target, + } + .into(), + ); + } + + cancel_item_attributes_approval { + let n in 0 .. 1_000; + + let (collection, caller, _) = create_collection::(); + let (item, ..) = mint_item::(0); + let target: T::AccountId = account("target", 0, SEED); + let target_lookup = T::Lookup::unlookup(target.clone()); + Nfts::::approve_item_attributes( + SystemOrigin::Signed(caller.clone()).into(), + collection, + item, + target_lookup.clone(), + )?; + T::Currency::make_free_balance_be(&target, DepositBalanceOf::::max_value()); + let value: BoundedVec<_, _> = vec![0u8; T::ValueLimit::get() as usize].try_into().unwrap(); + for i in 0..n { + let mut key = vec![0u8; T::KeyLimit::get() as usize]; + let mut s = Vec::from((i as u16).to_be_bytes()); + key.truncate(s.len()); + key.append(&mut s); + + Nfts::::set_attribute( + SystemOrigin::Signed(target.clone()).into(), + T::Helper::collection(0), + Some(item), + AttributeNamespace::Account(target.clone()), + key.try_into().unwrap(), + value.clone(), + )?; + } + let witness = CancelAttributesApprovalWitness { account_attributes: n }; + }: _(SystemOrigin::Signed(caller), collection, item, target_lookup, witness) + verify { + assert_last_event::( + Event::ItemAttributesApprovalRemoved { + collection, + item, + delegate: target, + } + .into(), + ); } set_metadata { diff --git a/frame/nfts/src/features/attributes.rs b/frame/nfts/src/features/attributes.rs index 85c1e0b302d12..0d65a1169323b 100644 --- a/frame/nfts/src/features/attributes.rs +++ b/frame/nfts/src/features/attributes.rs @@ -20,9 +20,10 @@ use frame_support::pallet_prelude::*; impl, I: 'static> Pallet { pub(crate) fn do_set_attribute( - maybe_check_owner: Option, + origin: T::AccountId, collection: T::CollectionId, maybe_item: Option, + namespace: AttributeNamespace, key: BoundedVec, value: BoundedVec, ) -> DispatchResult { @@ -34,90 +35,273 @@ impl, I: 'static> Pallet { let mut collection_details = Collection::::get(&collection).ok_or(Error::::UnknownCollection)?; - if let Some(check_owner) = &maybe_check_owner { - ensure!(check_owner == &collection_details.owner, Error::::NoPermission); - } + ensure!( + Self::is_valid_namespace( + &origin, + &namespace, + &collection, + &collection_details.owner, + &maybe_item, + )?, + Error::::NoPermission + ); let collection_config = Self::get_collection_config(&collection)?; - match maybe_item { - None => { - ensure!( - collection_config.is_setting_enabled(CollectionSetting::UnlockedAttributes), - Error::::LockedCollectionAttributes - ) - }, - Some(item) => { - let maybe_is_locked = Self::get_item_config(&collection, &item) - .map(|c| c.has_disabled_setting(ItemSetting::UnlockedAttributes))?; - ensure!(!maybe_is_locked, Error::::LockedItemAttributes); + // for the `CollectionOwner` namespace we need to check if the collection/item is not locked + match namespace { + AttributeNamespace::CollectionOwner => match maybe_item { + None => { + ensure!( + collection_config.is_setting_enabled(CollectionSetting::UnlockedAttributes), + Error::::LockedCollectionAttributes + ) + }, + Some(item) => { + let maybe_is_locked = Self::get_item_config(&collection, &item) + .map(|c| c.has_disabled_setting(ItemSetting::UnlockedAttributes))?; + ensure!(!maybe_is_locked, Error::::LockedItemAttributes); + }, }, - }; + _ => (), + } - let attribute = Attribute::::get((collection, maybe_item, &key)); + let attribute = Attribute::::get((collection, maybe_item, &namespace, &key)); if attribute.is_none() { collection_details.attributes.saturating_inc(); } - let old_deposit = attribute.map_or(Zero::zero(), |m| m.1); - collection_details.total_deposit.saturating_reduce(old_deposit); + + let old_deposit = + attribute.map_or(AttributeDeposit { account: None, amount: Zero::zero() }, |m| m.1); + let mut deposit = Zero::zero(); - if collection_config.is_setting_enabled(CollectionSetting::DepositRequired) && - maybe_check_owner.is_some() - { + if collection_config.is_setting_enabled(CollectionSetting::DepositRequired) { deposit = T::DepositPerByte::get() .saturating_mul(((key.len() + value.len()) as u32).into()) .saturating_add(T::AttributeDepositBase::get()); } - collection_details.total_deposit.saturating_accrue(deposit); - if deposit > old_deposit { - T::Currency::reserve(&collection_details.owner, deposit - old_deposit)?; - } else if deposit < old_deposit { - T::Currency::unreserve(&collection_details.owner, old_deposit - deposit); + + // NOTE: when we transfer an item, we don't move attributes in the ItemOwner namespace. + // When the new owner updates the same attribute, we will update the depositor record + // and return the deposit to the previous owner. + if old_deposit.account.is_some() && old_deposit.account != Some(origin.clone()) { + T::Currency::unreserve(&old_deposit.account.unwrap(), old_deposit.amount); + T::Currency::reserve(&origin, deposit)?; + } else if deposit > old_deposit.amount { + T::Currency::reserve(&origin, deposit - old_deposit.amount)?; + } else if deposit < old_deposit.amount { + T::Currency::unreserve(&origin, old_deposit.amount - deposit); } - Attribute::::insert((&collection, maybe_item, &key), (&value, deposit)); + // NOTE: we don't track the depositor in the CollectionOwner namespace as it's always a + // collection's owner. This simplifies the collection's transfer to another owner. + let deposit_owner = match namespace { + AttributeNamespace::CollectionOwner => { + collection_details.owner_deposit.saturating_accrue(deposit); + collection_details.owner_deposit.saturating_reduce(old_deposit.amount); + None + }, + _ => Some(origin), + }; + + Attribute::::insert( + (&collection, maybe_item, &namespace, &key), + (&value, AttributeDeposit { account: deposit_owner, amount: deposit }), + ); Collection::::insert(collection, &collection_details); - Self::deposit_event(Event::AttributeSet { collection, maybe_item, key, value }); + Self::deposit_event(Event::AttributeSet { collection, maybe_item, key, value, namespace }); Ok(()) } - pub(crate) fn do_clear_attribute( - maybe_check_owner: Option, + pub(crate) fn do_force_set_attribute( + set_as: Option, collection: T::CollectionId, maybe_item: Option, + namespace: AttributeNamespace, key: BoundedVec, + value: BoundedVec, ) -> DispatchResult { let mut collection_details = Collection::::get(&collection).ok_or(Error::::UnknownCollection)?; - if let Some(check_owner) = &maybe_check_owner { - ensure!(check_owner == &collection_details.owner, Error::::NoPermission); + + let attribute = Attribute::::get((collection, maybe_item, &namespace, &key)); + if let Some((_, deposit)) = attribute { + if deposit.account != set_as && deposit.amount != Zero::zero() { + if let Some(deposit_account) = deposit.account { + T::Currency::unreserve(&deposit_account, deposit.amount); + } + } + } else { + collection_details.attributes.saturating_inc(); } - if maybe_check_owner.is_some() { - match maybe_item { - None => { - let collection_config = Self::get_collection_config(&collection)?; + Attribute::::insert( + (&collection, maybe_item, &namespace, &key), + (&value, AttributeDeposit { account: set_as, amount: Zero::zero() }), + ); + Collection::::insert(collection, &collection_details); + Self::deposit_event(Event::AttributeSet { collection, maybe_item, key, value, namespace }); + Ok(()) + } + + pub(crate) fn do_clear_attribute( + maybe_check_owner: Option, + collection: T::CollectionId, + maybe_item: Option, + namespace: AttributeNamespace, + key: BoundedVec, + ) -> DispatchResult { + if let Some((_, deposit)) = + Attribute::::take((collection, maybe_item, &namespace, &key)) + { + let mut collection_details = + Collection::::get(&collection).ok_or(Error::::UnknownCollection)?; + + if let Some(check_owner) = &maybe_check_owner { + if deposit.account != maybe_check_owner { ensure!( - collection_config.is_setting_enabled(CollectionSetting::UnlockedAttributes), - Error::::LockedCollectionAttributes - ) - }, - Some(item) => { - // NOTE: if the item was previously burned, the ItemConfigOf record might - // not exist. In that case, we allow to clear the attribute. - let maybe_is_locked = Self::get_item_config(&collection, &item) - .map_or(false, |c| c.has_disabled_setting(ItemSetting::UnlockedAttributes)); - ensure!(!maybe_is_locked, Error::::LockedItemAttributes); - }, - }; - } + Self::is_valid_namespace( + &check_owner, + &namespace, + &collection, + &collection_details.owner, + &maybe_item, + )?, + Error::::NoPermission + ); + } + + // can't clear `CollectionOwner` type attributes if the collection/item is locked + match namespace { + AttributeNamespace::CollectionOwner => match maybe_item { + None => { + let collection_config = Self::get_collection_config(&collection)?; + ensure!( + collection_config + .is_setting_enabled(CollectionSetting::UnlockedAttributes), + Error::::LockedCollectionAttributes + ) + }, + Some(item) => { + // NOTE: if the item was previously burned, the ItemConfigOf record + // might not exist. In that case, we allow to clear the attribute. + let maybe_is_locked = Self::get_item_config(&collection, &item) + .map_or(false, |c| { + c.has_disabled_setting(ItemSetting::UnlockedAttributes) + }); + ensure!(!maybe_is_locked, Error::::LockedItemAttributes); + }, + }, + _ => (), + }; + } - if let Some((_, deposit)) = Attribute::::take((collection, maybe_item, &key)) { collection_details.attributes.saturating_dec(); - collection_details.total_deposit.saturating_reduce(deposit); - T::Currency::unreserve(&collection_details.owner, deposit); + match namespace { + AttributeNamespace::CollectionOwner => { + collection_details.owner_deposit.saturating_reduce(deposit.amount); + T::Currency::unreserve(&collection_details.owner, deposit.amount); + }, + _ => (), + }; + if let Some(deposit_account) = deposit.account { + T::Currency::unreserve(&deposit_account, deposit.amount); + } Collection::::insert(collection, &collection_details); - Self::deposit_event(Event::AttributeCleared { collection, maybe_item, key }); + Self::deposit_event(Event::AttributeCleared { collection, maybe_item, key, namespace }); } Ok(()) } + + pub(crate) fn do_approve_item_attributes( + check_origin: T::AccountId, + collection: T::CollectionId, + item: T::ItemId, + delegate: T::AccountId, + ) -> DispatchResult { + ensure!( + Self::is_pallet_feature_enabled(PalletFeature::Attributes), + Error::::MethodDisabled + ); + + let details = Item::::get(&collection, &item).ok_or(Error::::UnknownItem)?; + ensure!(check_origin == details.owner, Error::::NoPermission); + + ItemAttributesApprovalsOf::::try_mutate(collection, item, |approvals| { + approvals + .try_insert(delegate.clone()) + .map_err(|_| Error::::ReachedApprovalLimit)?; + + Self::deposit_event(Event::ItemAttributesApprovalAdded { collection, item, delegate }); + Ok(()) + }) + } + + pub(crate) fn do_cancel_item_attributes_approval( + check_origin: T::AccountId, + collection: T::CollectionId, + item: T::ItemId, + delegate: T::AccountId, + witness: CancelAttributesApprovalWitness, + ) -> DispatchResult { + ensure!( + Self::is_pallet_feature_enabled(PalletFeature::Attributes), + Error::::MethodDisabled + ); + + let details = Item::::get(&collection, &item).ok_or(Error::::UnknownItem)?; + ensure!(check_origin == details.owner, Error::::NoPermission); + + ItemAttributesApprovalsOf::::try_mutate(collection, item, |approvals| { + approvals.remove(&delegate); + + let mut attributes: u32 = 0; + let mut deposited: DepositBalanceOf = Zero::zero(); + for (_, (_, deposit)) in Attribute::::drain_prefix(( + &collection, + Some(item), + AttributeNamespace::Account(delegate.clone()), + )) { + attributes.saturating_inc(); + deposited = deposited.saturating_add(deposit.amount); + } + ensure!(attributes <= witness.account_attributes, Error::::BadWitness); + + if !deposited.is_zero() { + T::Currency::unreserve(&delegate, deposited); + } + + Self::deposit_event(Event::ItemAttributesApprovalRemoved { + collection, + item, + delegate, + }); + Ok(()) + }) + } + + fn is_valid_namespace( + origin: &T::AccountId, + namespace: &AttributeNamespace, + collection: &T::CollectionId, + collection_owner: &T::AccountId, + maybe_item: &Option, + ) -> Result { + let mut result = false; + match namespace { + AttributeNamespace::CollectionOwner => result = origin == collection_owner, + AttributeNamespace::ItemOwner => + if let Some(item) = maybe_item { + let item_details = + Item::::get(&collection, &item).ok_or(Error::::UnknownItem)?; + result = origin == &item_details.owner + }, + AttributeNamespace::Account(account_id) => + if let Some(item) = maybe_item { + let approvals = ItemAttributesApprovalsOf::::get(&collection, &item); + result = account_id == origin && approvals.contains(&origin) + }, + _ => (), + }; + Ok(result) + } } diff --git a/frame/nfts/src/features/create_delete_collection.rs b/frame/nfts/src/features/create_delete_collection.rs index b9530e88b18cd..86625bf49efb2 100644 --- a/frame/nfts/src/features/create_delete_collection.rs +++ b/frame/nfts/src/features/create_delete_collection.rs @@ -35,7 +35,7 @@ impl, I: 'static> Pallet { collection, CollectionDetails { owner: owner.clone(), - total_deposit: deposit, + owner_deposit: deposit, items: 0, item_metadatas: 0, attributes: 0, @@ -90,12 +90,21 @@ impl, I: 'static> Pallet { PendingSwapOf::::remove_prefix(&collection, None); CollectionMetadataOf::::remove(&collection); Self::clear_roles(&collection)?; - #[allow(deprecated)] - Attribute::::remove_prefix((&collection,), None); + + for (_, (_, deposit)) in Attribute::::drain_prefix((&collection,)) { + if !deposit.amount.is_zero() { + if let Some(account) = deposit.account { + T::Currency::unreserve(&account, deposit.amount); + } + } + } + CollectionAccount::::remove(&collection_details.owner, &collection); - T::Currency::unreserve(&collection_details.owner, collection_details.total_deposit); + T::Currency::unreserve(&collection_details.owner, collection_details.owner_deposit); CollectionConfigOf::::remove(&collection); let _ = ItemConfigOf::::clear_prefix(&collection, witness.items, None); + let _ = + ItemAttributesApprovalsOf::::clear_prefix(&collection, witness.items, None); Self::deposit_event(Event::Destroyed { collection }); diff --git a/frame/nfts/src/features/create_delete_item.rs b/frame/nfts/src/features/create_delete_item.rs index 10670f4b10c1c..bae1d02c8ad6b 100644 --- a/frame/nfts/src/features/create_delete_item.rs +++ b/frame/nfts/src/features/create_delete_item.rs @@ -109,6 +109,7 @@ impl, I: 'static> Pallet { Account::::remove((&owner, &collection, &item)); ItemPriceOf::::remove(&collection, &item); PendingSwapOf::::remove(&collection, &item); + ItemAttributesApprovalsOf::::remove(&collection, &item); // NOTE: if item's settings are not empty (e.g. item's metadata is locked) // then we keep the record and don't remove it diff --git a/frame/nfts/src/features/metadata.rs b/frame/nfts/src/features/metadata.rs index 0b0a337197d9b..3a12dbe64f2f4 100644 --- a/frame/nfts/src/features/metadata.rs +++ b/frame/nfts/src/features/metadata.rs @@ -46,7 +46,7 @@ impl, I: 'static> Pallet { collection_details.item_metadatas.saturating_inc(); } let old_deposit = metadata.take().map_or(Zero::zero(), |m| m.deposit); - collection_details.total_deposit.saturating_reduce(old_deposit); + collection_details.owner_deposit.saturating_reduce(old_deposit); let mut deposit = Zero::zero(); if collection_config.is_setting_enabled(CollectionSetting::DepositRequired) && maybe_check_owner.is_some() @@ -60,7 +60,7 @@ impl, I: 'static> Pallet { } else if deposit < old_deposit { T::Currency::unreserve(&collection_details.owner, old_deposit - deposit); } - collection_details.total_deposit.saturating_accrue(deposit); + collection_details.owner_deposit.saturating_accrue(deposit); *metadata = Some(ItemMetadata { deposit, data: data.clone() }); @@ -93,7 +93,7 @@ impl, I: 'static> Pallet { } let deposit = metadata.take().ok_or(Error::::UnknownItem)?.deposit; T::Currency::unreserve(&collection_details.owner, deposit); - collection_details.total_deposit.saturating_reduce(deposit); + collection_details.owner_deposit.saturating_reduce(deposit); Collection::::insert(&collection, &collection_details); Self::deposit_event(Event::MetadataCleared { collection, item }); @@ -121,7 +121,7 @@ impl, I: 'static> Pallet { CollectionMetadataOf::::try_mutate_exists(collection, |metadata| { let old_deposit = metadata.take().map_or(Zero::zero(), |m| m.deposit); - details.total_deposit.saturating_reduce(old_deposit); + details.owner_deposit.saturating_reduce(old_deposit); let mut deposit = Zero::zero(); if maybe_check_owner.is_some() && collection_config.is_setting_enabled(CollectionSetting::DepositRequired) @@ -135,7 +135,7 @@ impl, I: 'static> Pallet { } else if deposit < old_deposit { T::Currency::unreserve(&details.owner, old_deposit - deposit); } - details.total_deposit.saturating_accrue(deposit); + details.owner_deposit.saturating_accrue(deposit); Collection::::insert(&collection, details); diff --git a/frame/nfts/src/features/transfer.rs b/frame/nfts/src/features/transfer.rs index 7ebad853902a9..7d6ae3553a361 100644 --- a/frame/nfts/src/features/transfer.rs +++ b/frame/nfts/src/features/transfer.rs @@ -100,11 +100,12 @@ impl, I: 'static> Pallet { T::Currency::repatriate_reserved( &details.owner, &owner, - details.total_deposit, + details.owner_deposit, Reserved, )?; CollectionAccount::::remove(&details.owner, &collection); CollectionAccount::::insert(&owner, &collection, ()); + details.owner = owner.clone(); OwnershipAcceptance::::remove(&owner); @@ -150,7 +151,7 @@ impl, I: 'static> Pallet { T::Currency::repatriate_reserved( &details.owner, &owner, - details.total_deposit, + details.owner_deposit, Reserved, )?; diff --git a/frame/nfts/src/impl_nonfungibles.rs b/frame/nfts/src/impl_nonfungibles.rs index b42147e6687d9..a9e05a6f41ce9 100644 --- a/frame/nfts/src/impl_nonfungibles.rs +++ b/frame/nfts/src/impl_nonfungibles.rs @@ -49,6 +49,7 @@ impl, I: 'static> Inspect<::AccountId> for Palle fn attribute( collection: &Self::CollectionId, item: &Self::ItemId, + namespace: &AttributeNamespace<::AccountId>, key: &[u8], ) -> Option> { if key.is_empty() { @@ -56,7 +57,7 @@ impl, I: 'static> Inspect<::AccountId> for Palle ItemMetadataOf::::get(collection, item).map(|m| m.data.into()) } else { let key = BoundedSlice::<_, _>::try_from(key).ok()?; - Attribute::::get((collection, Some(item), key)).map(|a| a.0.into()) + Attribute::::get((collection, Some(item), namespace, key)).map(|a| a.0.into()) } } @@ -71,7 +72,13 @@ impl, I: 'static> Inspect<::AccountId> for Palle CollectionMetadataOf::::get(collection).map(|m| m.data.into()) } else { let key = BoundedSlice::<_, _>::try_from(key).ok()?; - Attribute::::get((collection, Option::::None, key)).map(|a| a.0.into()) + Attribute::::get(( + collection, + Option::::None, + AttributeNamespace::CollectionOwner, + key, + )) + .map(|a| a.0.into()) } } diff --git a/frame/nfts/src/lib.rs b/frame/nfts/src/lib.rs index 0f3d3c89c2932..8de9f3103e7c2 100644 --- a/frame/nfts/src/lib.rs +++ b/frame/nfts/src/lib.rs @@ -44,11 +44,10 @@ pub mod macros; pub mod weights; use codec::{Decode, Encode}; -use frame_support::{ - traits::{ - tokens::Locker, BalanceStatus::Reserved, Currency, EnsureOriginWithArg, ReservableCurrency, - }, - BoundedBTreeMap, +use frame_support::traits::{ + tokens::{AttributeNamespace, Locker}, + BalanceStatus::Reserved, + Currency, EnsureOriginWithArg, ReservableCurrency, }; use frame_system::Config as SystemConfig; use sp_runtime::{ @@ -156,6 +155,10 @@ pub mod pallet { #[pallet::constant] type ApprovalsLimit: Get; + /// The maximum attributes approvals an item could have. + #[pallet::constant] + type ItemAttributesApprovalsLimit: Get; + /// The max number of tips a user could send. #[pallet::constant] type MaxTips: Get; @@ -271,9 +274,10 @@ pub mod pallet { ( NMapKey, NMapKey>, + NMapKey>, NMapKey>, ), - (BoundedVec, DepositBalanceOf), + (BoundedVec, AttributeDepositOf), OptionQuery, >; @@ -289,6 +293,18 @@ pub mod pallet { OptionQuery, >; + /// Item attribute approvals. + #[pallet::storage] + pub(super) type ItemAttributesApprovalsOf, I: 'static = ()> = StorageDoubleMap< + _, + Blake2_128Concat, + T::CollectionId, + Blake2_128Concat, + T::ItemId, + ItemAttributesApprovals, + ValueQuery, + >; + /// Stores the `CollectionId` that is going to be used for the next collection. /// This gets incremented by 1 whenever a new collection is created. #[pallet::storage] @@ -412,12 +428,26 @@ pub mod pallet { maybe_item: Option, key: BoundedVec, value: BoundedVec, + namespace: AttributeNamespace, }, /// Attribute metadata has been cleared for a `collection` or `item`. AttributeCleared { collection: T::CollectionId, maybe_item: Option, key: BoundedVec, + namespace: AttributeNamespace, + }, + /// A new approval to modify item attributes was added. + ItemAttributesApprovalAdded { + collection: T::CollectionId, + item: T::ItemId, + delegate: T::AccountId, + }, + /// A new approval to modify item attributes was removed. + ItemAttributesApprovalRemoved { + collection: T::CollectionId, + item: T::ItemId, + delegate: T::AccountId, }, /// Ownership acceptance has changed for an account. OwnershipAcceptanceChanged { who: T::AccountId, maybe_collection: Option }, @@ -867,7 +897,7 @@ pub mod pallet { /// /// Origin must be Signed and the sender should be the Owner of the `collection`. /// - /// - `collection`: The collection to be frozen. + /// - `collection`: The collection of the items to be reevaluated. /// - `items`: The items of the collection whose deposits will be reevaluated. /// /// NOTE: This exists as a best-effort function. Any items which are unknown or @@ -1208,15 +1238,20 @@ pub mod pallet { /// Set an attribute for a collection or item. /// - /// Origin must be either `ForceOrigin` or Signed and the sender should be the Owner of the - /// `collection`. + /// Origin must be Signed and must conform to the namespace ruleset: + /// - `CollectionOwner` namespace could be modified by the `collection` owner only; + /// - `ItemOwner` namespace could be modified by the `maybe_item` owner only. `maybe_item` + /// should be set in that case; + /// - `Account(AccountId)` namespace could be modified only when the `origin` was given a + /// permission to do so; /// - /// If the origin is Signed, then funds of signer are reserved according to the formula: - /// `MetadataDepositBase + DepositPerByte * (key.len + value.len)` taking into + /// The funds of `origin` are reserved according to the formula: + /// `AttributeDepositBase + DepositPerByte * (key.len + value.len)` taking into /// account any already reserved funds. /// /// - `collection`: The identifier of the collection whose item's metadata to set. /// - `maybe_item`: The identifier of the item whose metadata to set. + /// - `namespace`: Attribute's namespace. /// - `key`: The key of the attribute. /// - `value`: The value to which to set the attribute. /// @@ -1228,13 +1263,43 @@ pub mod pallet { origin: OriginFor, collection: T::CollectionId, maybe_item: Option, + namespace: AttributeNamespace, key: BoundedVec, value: BoundedVec, ) -> DispatchResult { - let maybe_check_owner = T::ForceOrigin::try_origin(origin) - .map(|_| None) - .or_else(|origin| ensure_signed(origin).map(Some).map_err(DispatchError::from))?; - Self::do_set_attribute(maybe_check_owner, collection, maybe_item, key, value) + let origin = ensure_signed(origin)?; + Self::do_set_attribute(origin, collection, maybe_item, namespace, key, value) + } + + /// Force-set an attribute for a collection or item. + /// + /// Origin must be `ForceOrigin`. + /// + /// If the attribute already exists and it was set by another account, the deposit + /// will be returned to the previous owner. + /// + /// - `set_as`: An optional owner of the attribute. + /// - `collection`: The identifier of the collection whose item's metadata to set. + /// - `maybe_item`: The identifier of the item whose metadata to set. + /// - `namespace`: Attribute's namespace. + /// - `key`: The key of the attribute. + /// - `value`: The value to which to set the attribute. + /// + /// Emits `AttributeSet`. + /// + /// Weight: `O(1)` + #[pallet::weight(T::WeightInfo::force_set_attribute())] + pub fn force_set_attribute( + origin: OriginFor, + set_as: Option, + collection: T::CollectionId, + maybe_item: Option, + namespace: AttributeNamespace, + key: BoundedVec, + value: BoundedVec, + ) -> DispatchResult { + T::ForceOrigin::ensure_origin(origin)?; + Self::do_force_set_attribute(set_as, collection, maybe_item, namespace, key, value) } /// Clear an attribute for a collection or item. @@ -1246,6 +1311,7 @@ pub mod pallet { /// /// - `collection`: The identifier of the collection whose item's metadata to clear. /// - `maybe_item`: The identifier of the item whose metadata to clear. + /// - `namespace`: Attribute's namespace. /// - `key`: The key of the attribute. /// /// Emits `AttributeCleared`. @@ -1256,12 +1322,57 @@ pub mod pallet { origin: OriginFor, collection: T::CollectionId, maybe_item: Option, + namespace: AttributeNamespace, key: BoundedVec, ) -> DispatchResult { let maybe_check_owner = T::ForceOrigin::try_origin(origin) .map(|_| None) .or_else(|origin| ensure_signed(origin).map(Some).map_err(DispatchError::from))?; - Self::do_clear_attribute(maybe_check_owner, collection, maybe_item, key) + Self::do_clear_attribute(maybe_check_owner, collection, maybe_item, namespace, key) + } + + /// Approve item's attributes to be changed by a delegated third-party account. + /// + /// Origin must be Signed and must be an owner of the `item`. + /// + /// - `collection`: A collection of the item. + /// - `item`: The item that holds attributes. + /// - `delegate`: The account to delegate permission to change attributes of the item. + /// + /// Emits `ItemAttributesApprovalAdded` on success. + #[pallet::weight(T::WeightInfo::approve_item_attributes())] + pub fn approve_item_attributes( + origin: OriginFor, + collection: T::CollectionId, + item: T::ItemId, + delegate: AccountIdLookupOf, + ) -> DispatchResult { + let origin = ensure_signed(origin)?; + let delegate = T::Lookup::lookup(delegate)?; + Self::do_approve_item_attributes(origin, collection, item, delegate) + } + + /// Cancel the previously provided approval to change item's attributes. + /// All the previously set attributes by the `delegate` will be removed. + /// + /// Origin must be Signed and must be an owner of the `item`. + /// + /// - `collection`: Collection that the item is contained within. + /// - `item`: The item that holds attributes. + /// - `delegate`: The previously approved account to remove. + /// + /// Emits `ItemAttributesApprovalRemoved` on success. + #[pallet::weight(T::WeightInfo::cancel_item_attributes_approval())] + pub fn cancel_item_attributes_approval( + origin: OriginFor, + collection: T::CollectionId, + item: T::ItemId, + delegate: AccountIdLookupOf, + witness: CancelAttributesApprovalWitness, + ) -> DispatchResult { + let origin = ensure_signed(origin)?; + let delegate = T::Lookup::lookup(delegate)?; + Self::do_cancel_item_attributes_approval(origin, collection, item, delegate, witness) } /// Set the metadata for an item. diff --git a/frame/nfts/src/mock.rs b/frame/nfts/src/mock.rs index bbd1625710500..f814b209d5f78 100644 --- a/frame/nfts/src/mock.rs +++ b/frame/nfts/src/mock.rs @@ -105,6 +105,7 @@ impl Config for Test { type KeyLimit = ConstU32<50>; type ValueLimit = ConstU32<50>; type ApprovalsLimit = ConstU32<10>; + type ItemAttributesApprovalsLimit = ConstU32<2>; type MaxTips = ConstU32<10>; type MaxDeadlineDuration = ConstU64<10000>; type Features = Features; diff --git a/frame/nfts/src/tests.rs b/frame/nfts/src/tests.rs index b58c81b1d70f8..1e057a8b58d6d 100644 --- a/frame/nfts/src/tests.rs +++ b/frame/nfts/src/tests.rs @@ -25,6 +25,7 @@ use frame_support::{ traits::{tokens::nonfungibles_v2::Destroy, Currency, Get}, }; use pallet_balances::Error as BalancesError; +use sp_core::bounded::BoundedVec; use sp_std::prelude::*; fn items() -> Vec<(u64, u32, u32)> { @@ -67,11 +68,12 @@ macro_rules! bvec { } } -fn attributes(collection: u32) -> Vec<(Option, Vec, Vec)> { +fn attributes(collection: u32) -> Vec<(Option, AttributeNamespace, Vec, Vec)> { let mut s: Vec<_> = Attribute::::iter_prefix((collection,)) - .map(|(k, v)| (k.0, k.1.into(), v.0.into())) + .map(|(k, v)| (k.0, k.1, k.2.into(), v.0.into())) .collect(); - s.sort(); + s.sort_by_key(|k: &(Option, AttributeNamespace, Vec, Vec)| k.0); + s.sort_by_key(|k: &(Option, AttributeNamespace, Vec, Vec)| k.2.clone()); s } @@ -81,6 +83,12 @@ fn approvals(collection_id: u32, item_id: u32) -> Vec<(u64, Option)> { s } +fn item_attributes_approvals(collection_id: u32, item_id: u32) -> Vec { + let approvals = ItemAttributesApprovalsOf::::get(collection_id, item_id); + let s: Vec<_> = approvals.into_iter().collect(); + s +} + fn events() -> Vec> { let result = System::events() .into_iter() @@ -583,7 +591,7 @@ fn set_item_metadata_should_work() { } #[test] -fn set_attribute_should_work() { +fn set_collection_owner_attributes_should_work() { new_test_ext().execute_with(|| { Balances::make_free_balance_be(&1, 100); @@ -594,34 +602,73 @@ fn set_attribute_should_work() { )); assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 0, None)); - assert_ok!(Nfts::set_attribute(RuntimeOrigin::signed(1), 0, None, bvec![0], bvec![0])); - assert_ok!(Nfts::set_attribute(RuntimeOrigin::signed(1), 0, Some(0), bvec![0], bvec![0])); - assert_ok!(Nfts::set_attribute(RuntimeOrigin::signed(1), 0, Some(0), bvec![1], bvec![0])); + assert_ok!(Nfts::set_attribute( + RuntimeOrigin::signed(1), + 0, + None, + AttributeNamespace::CollectionOwner, + bvec![0], + bvec![0], + )); + assert_ok!(Nfts::set_attribute( + RuntimeOrigin::signed(1), + 0, + Some(0), + AttributeNamespace::CollectionOwner, + bvec![0], + bvec![0], + )); + assert_ok!(Nfts::set_attribute( + RuntimeOrigin::signed(1), + 0, + Some(0), + AttributeNamespace::CollectionOwner, + bvec![1], + bvec![0], + )); assert_eq!( attributes(0), vec![ - (None, bvec![0], bvec![0]), - (Some(0), bvec![0], bvec![0]), - (Some(0), bvec![1], bvec![0]), + (None, AttributeNamespace::CollectionOwner, bvec![0], bvec![0]), + (Some(0), AttributeNamespace::CollectionOwner, bvec![0], bvec![0]), + (Some(0), AttributeNamespace::CollectionOwner, bvec![1], bvec![0]), ] ); assert_eq!(Balances::reserved_balance(1), 10); + assert_eq!(Collection::::get(0).unwrap().owner_deposit, 9); - assert_ok!(Nfts::set_attribute(RuntimeOrigin::signed(1), 0, None, bvec![0], bvec![0; 10])); + assert_ok!(Nfts::set_attribute( + RuntimeOrigin::signed(1), + 0, + None, + AttributeNamespace::CollectionOwner, + bvec![0], + bvec![0; 10], + )); assert_eq!( attributes(0), vec![ - (None, bvec![0], bvec![0; 10]), - (Some(0), bvec![0], bvec![0]), - (Some(0), bvec![1], bvec![0]), + (None, AttributeNamespace::CollectionOwner, bvec![0], bvec![0; 10]), + (Some(0), AttributeNamespace::CollectionOwner, bvec![0], bvec![0]), + (Some(0), AttributeNamespace::CollectionOwner, bvec![1], bvec![0]), ] ); assert_eq!(Balances::reserved_balance(1), 19); + assert_eq!(Collection::::get(0).unwrap().owner_deposit, 18); - assert_ok!(Nfts::clear_attribute(RuntimeOrigin::signed(1), 0, Some(0), bvec![1])); + assert_ok!(Nfts::clear_attribute( + RuntimeOrigin::signed(1), + 0, + Some(0), + AttributeNamespace::CollectionOwner, + bvec![1], + )); assert_eq!( attributes(0), - vec![(None, bvec![0], bvec![0; 10]), (Some(0), bvec![0], bvec![0]),] + vec![ + (None, AttributeNamespace::CollectionOwner, bvec![0], bvec![0; 10]), + (Some(0), AttributeNamespace::CollectionOwner, bvec![0], bvec![0]), + ] ); assert_eq!(Balances::reserved_balance(1), 16); @@ -633,27 +680,301 @@ fn set_attribute_should_work() { } #[test] -fn set_attribute_should_respect_lock() { +fn set_item_owner_attributes_should_work() { new_test_ext().execute_with(|| { Balances::make_free_balance_be(&1, 100); + Balances::make_free_balance_be(&2, 100); + Balances::make_free_balance_be(&3, 100); + + assert_ok!(Nfts::force_create( + RuntimeOrigin::root(), + 1, + collection_config_with_all_settings_enabled() + )); + assert_ok!(Nfts::force_mint(RuntimeOrigin::signed(1), 0, 0, 2, default_item_config())); + + // can't set for the collection + assert_noop!( + Nfts::set_attribute( + RuntimeOrigin::signed(2), + 0, + None, + AttributeNamespace::ItemOwner, + bvec![0], + bvec![0], + ), + Error::::NoPermission, + ); + // can't set for the non-owned item + assert_noop!( + Nfts::set_attribute( + RuntimeOrigin::signed(1), + 0, + Some(0), + AttributeNamespace::ItemOwner, + bvec![0], + bvec![0], + ), + Error::::NoPermission, + ); + assert_ok!(Nfts::set_attribute( + RuntimeOrigin::signed(2), + 0, + Some(0), + AttributeNamespace::ItemOwner, + bvec![0], + bvec![0], + )); + assert_ok!(Nfts::set_attribute( + RuntimeOrigin::signed(2), + 0, + Some(0), + AttributeNamespace::ItemOwner, + bvec![1], + bvec![0], + )); + assert_ok!(Nfts::set_attribute( + RuntimeOrigin::signed(2), + 0, + Some(0), + AttributeNamespace::ItemOwner, + bvec![2], + bvec![0], + )); + assert_eq!( + attributes(0), + vec![ + (Some(0), AttributeNamespace::ItemOwner, bvec![0], bvec![0]), + (Some(0), AttributeNamespace::ItemOwner, bvec![1], bvec![0]), + (Some(0), AttributeNamespace::ItemOwner, bvec![2], bvec![0]), + ] + ); + assert_eq!(Balances::reserved_balance(2), 9); + + // validate an attribute can be updated + assert_ok!(Nfts::set_attribute( + RuntimeOrigin::signed(2), + 0, + Some(0), + AttributeNamespace::ItemOwner, + bvec![0], + bvec![0; 10], + )); + assert_eq!( + attributes(0), + vec![ + (Some(0), AttributeNamespace::ItemOwner, bvec![0], bvec![0; 10]), + (Some(0), AttributeNamespace::ItemOwner, bvec![1], bvec![0]), + (Some(0), AttributeNamespace::ItemOwner, bvec![2], bvec![0]), + ] + ); + assert_eq!(Balances::reserved_balance(2), 18); + + // validate only item's owner (or the root) can remove an attribute + assert_noop!( + Nfts::clear_attribute( + RuntimeOrigin::signed(1), + 0, + Some(0), + AttributeNamespace::ItemOwner, + bvec![1], + ), + Error::::NoPermission, + ); + assert_ok!(Nfts::clear_attribute( + RuntimeOrigin::signed(2), + 0, + Some(0), + AttributeNamespace::ItemOwner, + bvec![1], + )); + assert_eq!( + attributes(0), + vec![ + (Some(0), AttributeNamespace::ItemOwner, bvec![0], bvec![0; 10]), + (Some(0), AttributeNamespace::ItemOwner, bvec![2], bvec![0]) + ] + ); + assert_eq!(Balances::reserved_balance(2), 15); + + // transfer item + assert_ok!(Nfts::transfer(RuntimeOrigin::signed(2), 0, 0, 3)); + + // validate the attribute are still here & the deposit belongs to the previous owner + assert_eq!( + attributes(0), + vec![ + (Some(0), AttributeNamespace::ItemOwner, bvec![0], bvec![0; 10]), + (Some(0), AttributeNamespace::ItemOwner, bvec![2], bvec![0]) + ] + ); + let key: BoundedVec<_, _> = bvec![0]; + let (_, deposit) = + Attribute::::get((0, Some(0), AttributeNamespace::ItemOwner, &key)).unwrap(); + assert_eq!(deposit.account, Some(2)); + assert_eq!(deposit.amount, 12); + + // on attribute update the deposit should be returned to the previous owner + assert_ok!(Nfts::set_attribute( + RuntimeOrigin::signed(3), + 0, + Some(0), + AttributeNamespace::ItemOwner, + bvec![0], + bvec![0; 11], + )); + let (_, deposit) = + Attribute::::get((0, Some(0), AttributeNamespace::ItemOwner, &key)).unwrap(); + assert_eq!(deposit.account, Some(3)); + assert_eq!(deposit.amount, 13); + assert_eq!(Balances::reserved_balance(2), 3); + assert_eq!(Balances::reserved_balance(3), 13); + + // validate attributes on item deletion + assert_ok!(Nfts::burn(RuntimeOrigin::signed(3), 0, 0, None)); + assert_eq!( + attributes(0), + vec![ + (Some(0), AttributeNamespace::ItemOwner, bvec![0], bvec![0; 11]), + (Some(0), AttributeNamespace::ItemOwner, bvec![2], bvec![0]) + ] + ); + assert_ok!(Nfts::clear_attribute( + RuntimeOrigin::signed(3), + 0, + Some(0), + AttributeNamespace::ItemOwner, + bvec![0], + )); + assert_ok!(Nfts::clear_attribute( + RuntimeOrigin::signed(2), + 0, + Some(0), + AttributeNamespace::ItemOwner, + bvec![2], + )); + assert_eq!(Balances::reserved_balance(2), 0); + assert_eq!(Balances::reserved_balance(3), 0); + }); +} + +#[test] +fn set_external_account_attributes_should_work() { + new_test_ext().execute_with(|| { + Balances::make_free_balance_be(&1, 100); + Balances::make_free_balance_be(&2, 100); assert_ok!(Nfts::force_create( RuntimeOrigin::root(), 1, collection_config_with_all_settings_enabled() )); + assert_ok!(Nfts::force_mint(RuntimeOrigin::signed(1), 0, 0, 1, default_item_config())); + assert_ok!(Nfts::approve_item_attributes(RuntimeOrigin::signed(1), 0, 0, 2)); + + assert_noop!( + Nfts::set_attribute( + RuntimeOrigin::signed(2), + 0, + Some(0), + AttributeNamespace::Account(1), + bvec![0], + bvec![0], + ), + Error::::NoPermission, + ); + assert_ok!(Nfts::set_attribute( + RuntimeOrigin::signed(2), + 0, + Some(0), + AttributeNamespace::Account(2), + bvec![0], + bvec![0], + )); + assert_ok!(Nfts::set_attribute( + RuntimeOrigin::signed(2), + 0, + Some(0), + AttributeNamespace::Account(2), + bvec![1], + bvec![0], + )); + assert_eq!( + attributes(0), + vec![ + (Some(0), AttributeNamespace::Account(2), bvec![0], bvec![0]), + (Some(0), AttributeNamespace::Account(2), bvec![1], bvec![0]), + ] + ); + assert_eq!(Balances::reserved_balance(2), 6); + + // remove permission to set attributes + assert_ok!(Nfts::cancel_item_attributes_approval( + RuntimeOrigin::signed(1), + 0, + 0, + 2, + CancelAttributesApprovalWitness { account_attributes: 2 }, + )); + assert_eq!(attributes(0), vec![]); + assert_eq!(Balances::reserved_balance(2), 0); + assert_noop!( + Nfts::set_attribute( + RuntimeOrigin::signed(2), + 0, + Some(0), + AttributeNamespace::Account(2), + bvec![0], + bvec![0], + ), + Error::::NoPermission, + ); + }); +} + +#[test] +fn set_attribute_should_respect_lock() { + new_test_ext().execute_with(|| { + Balances::make_free_balance_be(&1, 100); + + assert_ok!(Nfts::force_create( + RuntimeOrigin::root(), + 1, + collection_config_with_all_settings_enabled(), + )); assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 0, None)); assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 1, None)); - assert_ok!(Nfts::set_attribute(RuntimeOrigin::signed(1), 0, None, bvec![0], bvec![0])); - assert_ok!(Nfts::set_attribute(RuntimeOrigin::signed(1), 0, Some(0), bvec![0], bvec![0])); - assert_ok!(Nfts::set_attribute(RuntimeOrigin::signed(1), 0, Some(1), bvec![0], bvec![0])); + assert_ok!(Nfts::set_attribute( + RuntimeOrigin::signed(1), + 0, + None, + AttributeNamespace::CollectionOwner, + bvec![0], + bvec![0], + )); + assert_ok!(Nfts::set_attribute( + RuntimeOrigin::signed(1), + 0, + Some(0), + AttributeNamespace::CollectionOwner, + bvec![0], + bvec![0], + )); + assert_ok!(Nfts::set_attribute( + RuntimeOrigin::signed(1), + 0, + Some(1), + AttributeNamespace::CollectionOwner, + bvec![0], + bvec![0], + )); assert_eq!( attributes(0), vec![ - (None, bvec![0], bvec![0]), - (Some(0), bvec![0], bvec![0]), - (Some(1), bvec![0], bvec![0]), + (None, AttributeNamespace::CollectionOwner, bvec![0], bvec![0]), + (Some(0), AttributeNamespace::CollectionOwner, bvec![0], bvec![0]), + (Some(1), AttributeNamespace::CollectionOwner, bvec![0], bvec![0]), ] ); assert_eq!(Balances::reserved_balance(1), 11); @@ -666,16 +987,47 @@ fn set_attribute_should_respect_lock() { )); let e = Error::::LockedCollectionAttributes; - assert_noop!(Nfts::set_attribute(RuntimeOrigin::signed(1), 0, None, bvec![0], bvec![0]), e); - assert_ok!(Nfts::set_attribute(RuntimeOrigin::signed(1), 0, Some(0), bvec![0], bvec![1])); + assert_noop!( + Nfts::set_attribute( + RuntimeOrigin::signed(1), + 0, + None, + AttributeNamespace::CollectionOwner, + bvec![0], + bvec![0], + ), + e + ); + assert_ok!(Nfts::set_attribute( + RuntimeOrigin::signed(1), + 0, + Some(0), + AttributeNamespace::CollectionOwner, + bvec![0], + bvec![1], + )); assert_ok!(Nfts::lock_item_properties(RuntimeOrigin::signed(1), 0, 0, false, true)); let e = Error::::LockedItemAttributes; assert_noop!( - Nfts::set_attribute(RuntimeOrigin::signed(1), 0, Some(0), bvec![0], bvec![1]), + Nfts::set_attribute( + RuntimeOrigin::signed(1), + 0, + Some(0), + AttributeNamespace::CollectionOwner, + bvec![0], + bvec![1], + ), e ); - assert_ok!(Nfts::set_attribute(RuntimeOrigin::signed(1), 0, Some(1), bvec![0], bvec![1])); + assert_ok!(Nfts::set_attribute( + RuntimeOrigin::signed(1), + 0, + Some(1), + AttributeNamespace::CollectionOwner, + bvec![0], + bvec![1], + )); }); } @@ -1905,8 +2257,9 @@ fn pallet_level_feature_flags_should_work() { RuntimeOrigin::signed(user_id), collection_id, None, + AttributeNamespace::CollectionOwner, + bvec![0], bvec![0], - bvec![0] ), Error::::MethodDisabled ); @@ -1942,3 +2295,58 @@ fn group_roles_by_account_should_work() { assert_eq!(account_to_role, expect); }) } + +#[test] +fn add_remove_item_attributes_approval_should_work() { + new_test_ext().execute_with(|| { + let user_1 = 1; + let user_2 = 2; + let user_3 = 3; + let user_4 = 4; + let collection_id = 0; + let item_id = 0; + + assert_ok!(Nfts::force_create(RuntimeOrigin::root(), user_1, default_collection_config())); + assert_ok!(Nfts::mint(RuntimeOrigin::signed(user_1), collection_id, item_id, None)); + assert_ok!(Nfts::approve_item_attributes( + RuntimeOrigin::signed(user_1), + collection_id, + item_id, + user_2, + )); + assert_eq!(item_attributes_approvals(collection_id, item_id), vec![user_2]); + + assert_ok!(Nfts::approve_item_attributes( + RuntimeOrigin::signed(user_1), + collection_id, + item_id, + user_3, + )); + assert_ok!(Nfts::approve_item_attributes( + RuntimeOrigin::signed(user_1), + collection_id, + item_id, + user_2, + )); + assert_eq!(item_attributes_approvals(collection_id, item_id), vec![user_2, user_3]); + + assert_noop!( + Nfts::approve_item_attributes( + RuntimeOrigin::signed(user_1), + collection_id, + item_id, + user_4, + ), + Error::::ReachedApprovalLimit + ); + + assert_ok!(Nfts::cancel_item_attributes_approval( + RuntimeOrigin::signed(user_1), + collection_id, + item_id, + user_2, + CancelAttributesApprovalWitness { account_attributes: 1 }, + )); + assert_eq!(item_attributes_approvals(collection_id, item_id), vec![user_3]); + }) +} diff --git a/frame/nfts/src/types.rs b/frame/nfts/src/types.rs index d57f62be97f39..c12ae39877d46 100644 --- a/frame/nfts/src/types.rs +++ b/frame/nfts/src/types.rs @@ -24,6 +24,7 @@ use enumflags2::{bitflags, BitFlags}; use frame_support::{ pallet_prelude::{BoundedVec, MaxEncodedLen}, traits::Get, + BoundedBTreeMap, BoundedBTreeSet, }; use scale_info::{build::Fields, meta_type, Path, Type, TypeInfo, TypeParameter}; @@ -36,8 +37,12 @@ pub(super) type ApprovalsOf = BoundedBTreeMap< Option<::BlockNumber>, >::ApprovalsLimit, >; +pub(super) type ItemAttributesApprovals = + BoundedBTreeSet<::AccountId, >::ItemAttributesApprovalsLimit>; pub(super) type ItemDepositOf = ItemDeposit, ::AccountId>; +pub(super) type AttributeDepositOf = + AttributeDeposit, ::AccountId>; pub(super) type ItemDetailsFor = ItemDetails<::AccountId, ItemDepositOf, ApprovalsOf>; pub(super) type BalanceOf = @@ -65,9 +70,9 @@ impl_incrementable!(u8, u16, u32, u64, u128, i8, i16, i32, i64, i128); pub struct CollectionDetails { /// Collection's owner. pub(super) owner: AccountId, - /// The total balance deposited for the all storage associated with this collection. - /// Used by `destroy`. - pub(super) total_deposit: DepositBalance, + /// The total balance deposited by the owner for the all storage data associated with this + /// collection. Used by `destroy`. + pub(super) owner_deposit: DepositBalance, /// The total number of outstanding items of this collection. pub(super) items: u32, /// The total number of outstanding item metadata of this collection. @@ -100,6 +105,13 @@ impl CollectionDetails { } } +/// Witness data for items mint transactions. +#[derive(Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug, TypeInfo)] +pub struct MintWitness { + /// Provide the id of the item in a required collection. + pub owner_of_item: ItemId, +} + /// Information concerning the ownership of a single unique item. #[derive(Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug, Default, TypeInfo, MaxEncodedLen)] pub struct ItemDetails { @@ -173,6 +185,15 @@ pub struct PendingSwap { pub(super) deadline: Deadline, } +/// Information about the reserved attribute deposit. +#[derive(Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug, TypeInfo, MaxEncodedLen)] +pub struct AttributeDeposit { + /// A depositor account. + pub(super) account: Option, + /// An amount that gets reserved. + pub(super) amount: DepositBalance, +} + #[derive(Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug, TypeInfo, MaxEncodedLen)] pub enum PriceDirection { Send, @@ -265,9 +286,9 @@ impl Default for MintSettings { - /// Provide the id of the item in a required collection. - pub owner_of_item: ItemId, +pub struct CancelAttributesApprovalWitness { + /// An amount of attributes previously created by account. + pub account_attributes: u32, } #[derive( diff --git a/frame/nfts/src/weights.rs b/frame/nfts/src/weights.rs index f254726ca19f2..a7eb3773f2ae8 100644 --- a/frame/nfts/src/weights.rs +++ b/frame/nfts/src/weights.rs @@ -63,7 +63,10 @@ pub trait WeightInfo { fn force_collection_config() -> Weight; fn lock_item_properties() -> Weight; fn set_attribute() -> Weight; + fn force_set_attribute() -> Weight; fn clear_attribute() -> Weight; + fn approve_item_attributes() -> Weight; + fn cancel_item_attributes_approval() -> Weight; fn set_metadata() -> Weight; fn clear_metadata() -> Weight; fn set_collection_metadata() -> Weight; @@ -258,12 +261,39 @@ impl WeightInfo for SubstrateWeight { // Storage: Nfts CollectionConfigOf (r:1 w:0) // Storage: Nfts ItemConfigOf (r:1 w:0) // Storage: Nfts Attribute (r:1 w:1) + fn force_set_attribute() -> Weight { + Weight::from_ref_time(53_019_000 as u64) + .saturating_add(T::DbWeight::get().reads(4 as u64)) + .saturating_add(T::DbWeight::get().writes(2 as u64)) + } + // Storage: Nfts Class (r:1 w:1) + // Storage: Nfts CollectionConfigOf (r:1 w:0) + // Storage: Nfts ItemConfigOf (r:1 w:0) + // Storage: Nfts Attribute (r:1 w:1) fn clear_attribute() -> Weight { Weight::from_ref_time(52_530_000 as u64) .saturating_add(T::DbWeight::get().reads(4 as u64)) .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Nfts Class (r:1 w:1) + // Storage: Nfts CollectionConfigOf (r:1 w:0) + // Storage: Nfts ItemConfigOf (r:1 w:0) + // Storage: Nfts Attribute (r:1 w:1) + fn approve_item_attributes() -> Weight { + Weight::from_ref_time(52_530_000 as u64) + .saturating_add(T::DbWeight::get().reads(4 as u64)) + .saturating_add(T::DbWeight::get().writes(2 as u64)) + } + // Storage: Nfts Class (r:1 w:1) + // Storage: Nfts CollectionConfigOf (r:1 w:0) + // Storage: Nfts ItemConfigOf (r:1 w:0) + // Storage: Nfts Attribute (r:1 w:1) + fn cancel_item_attributes_approval() -> Weight { + Weight::from_ref_time(52_530_000 as u64) + .saturating_add(T::DbWeight::get().reads(4 as u64)) + .saturating_add(T::DbWeight::get().writes(2 as u64)) + } + // Storage: Nfts Class (r:1 w:1) // Storage: Nfts ItemConfigOf (r:1 w:0) // Storage: Nfts CollectionConfigOf (r:1 w:0) // Storage: Nfts InstanceMetadataOf (r:1 w:1) @@ -566,12 +596,39 @@ impl WeightInfo for () { // Storage: Nfts CollectionConfigOf (r:1 w:0) // Storage: Nfts ItemConfigOf (r:1 w:0) // Storage: Nfts Attribute (r:1 w:1) + fn force_set_attribute() -> Weight { + Weight::from_ref_time(53_019_000 as u64) + .saturating_add(RocksDbWeight::get().reads(4 as u64)) + .saturating_add(RocksDbWeight::get().writes(2 as u64)) + } + // Storage: Nfts Class (r:1 w:1) + // Storage: Nfts CollectionConfigOf (r:1 w:0) + // Storage: Nfts ItemConfigOf (r:1 w:0) + // Storage: Nfts Attribute (r:1 w:1) fn clear_attribute() -> Weight { Weight::from_ref_time(52_530_000 as u64) .saturating_add(RocksDbWeight::get().reads(4 as u64)) .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Nfts Class (r:1 w:1) + // Storage: Nfts CollectionConfigOf (r:1 w:0) + // Storage: Nfts ItemConfigOf (r:1 w:0) + // Storage: Nfts Attribute (r:1 w:1) + fn approve_item_attributes() -> Weight { + Weight::from_ref_time(52_530_000 as u64) + .saturating_add(RocksDbWeight::get().reads(4 as u64)) + .saturating_add(RocksDbWeight::get().writes(2 as u64)) + } + // Storage: Nfts Class (r:1 w:1) + // Storage: Nfts CollectionConfigOf (r:1 w:0) + // Storage: Nfts ItemConfigOf (r:1 w:0) + // Storage: Nfts Attribute (r:1 w:1) + fn cancel_item_attributes_approval() -> Weight { + Weight::from_ref_time(52_530_000 as u64) + .saturating_add(RocksDbWeight::get().reads(4 as u64)) + .saturating_add(RocksDbWeight::get().writes(2 as u64)) + } + // Storage: Nfts Class (r:1 w:1) // Storage: Nfts ItemConfigOf (r:1 w:0) // Storage: Nfts CollectionConfigOf (r:1 w:0) // Storage: Nfts InstanceMetadataOf (r:1 w:1) diff --git a/frame/support/src/lib.rs b/frame/support/src/lib.rs index 84e416e50544d..a5d7c36de2c6c 100644 --- a/frame/support/src/lib.rs +++ b/frame/support/src/lib.rs @@ -115,7 +115,7 @@ pub use sp_runtime::{ self, print, traits::Printable, ConsensusEngineId, MAX_MODULE_ERROR_ENCODED_SIZE, }; -use codec::{Decode, Encode}; +use codec::{Decode, Encode, MaxEncodedLen}; use scale_info::TypeInfo; use sp_runtime::TypeId; @@ -127,7 +127,7 @@ pub const LOG_TARGET: &str = "runtime::frame-support"; pub enum Never {} /// A pallet identifier. These are per pallet and should be stored in a registry somewhere. -#[derive(Clone, Copy, Eq, PartialEq, Encode, Decode, TypeInfo)] +#[derive(Clone, Copy, Debug, Eq, PartialEq, Encode, Decode, TypeInfo, MaxEncodedLen)] pub struct PalletId(pub [u8; 8]); impl TypeId for PalletId { diff --git a/frame/support/src/traits/tokens.rs b/frame/support/src/traits/tokens.rs index b3b3b4b7d90b1..03a24bd3ba9c8 100644 --- a/frame/support/src/traits/tokens.rs +++ b/frame/support/src/traits/tokens.rs @@ -28,6 +28,6 @@ pub mod nonfungibles; pub mod nonfungibles_v2; pub use imbalance::Imbalance; pub use misc::{ - AssetId, Balance, BalanceConversion, BalanceStatus, DepositConsequence, ExistenceRequirement, - Locker, WithdrawConsequence, WithdrawReasons, + AssetId, AttributeNamespace, Balance, BalanceConversion, BalanceStatus, DepositConsequence, + ExistenceRequirement, Locker, WithdrawConsequence, WithdrawReasons, }; diff --git a/frame/support/src/traits/tokens/misc.rs b/frame/support/src/traits/tokens/misc.rs index 294d0e89c8b9e..f0b172841aa84 100644 --- a/frame/support/src/traits/tokens/misc.rs +++ b/frame/support/src/traits/tokens/misc.rs @@ -17,6 +17,7 @@ //! Miscellaneous types. +use crate::PalletId; use codec::{Decode, Encode, FullCodec, MaxEncodedLen}; use sp_arithmetic::traits::{AtLeast32BitUnsigned, Zero}; use sp_core::RuntimeDebug; @@ -126,6 +127,21 @@ pub enum BalanceStatus { Reserved, } +/// Attribute namespaces for non-fungible tokens. +#[derive( + Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug, scale_info::TypeInfo, MaxEncodedLen, +)] +pub enum AttributeNamespace { + /// An attribute was set by the pallet. + Pallet(PalletId), + /// An attribute was set by collection's owner. + CollectionOwner, + /// An attribute was set by item's owner. + ItemOwner, + /// An attribute was set by pre-approved account. + Account(AccountId), +} + bitflags::bitflags! { /// Reasons for moving funds out of an account. #[derive(Encode, Decode, MaxEncodedLen)] diff --git a/frame/support/src/traits/tokens/nonfungible_v2.rs b/frame/support/src/traits/tokens/nonfungible_v2.rs index 4f610d9b80a05..cd091791821ed 100644 --- a/frame/support/src/traits/tokens/nonfungible_v2.rs +++ b/frame/support/src/traits/tokens/nonfungible_v2.rs @@ -25,7 +25,10 @@ //! use. use super::nonfungibles_v2 as nonfungibles; -use crate::{dispatch::DispatchResult, traits::Get}; +use crate::{ + dispatch::DispatchResult, + traits::{tokens::misc::AttributeNamespace, Get}, +}; use codec::{Decode, Encode}; use sp_runtime::TokenError; use sp_std::prelude::*; @@ -42,15 +45,23 @@ pub trait Inspect { /// Returns the attribute value of `item` corresponding to `key`. /// /// By default this is `None`; no attributes are defined. - fn attribute(_item: &Self::ItemId, _key: &[u8]) -> Option> { + fn attribute( + _item: &Self::ItemId, + _namespace: &AttributeNamespace, + _key: &[u8], + ) -> Option> { None } /// Returns the strongly-typed attribute value of `item` corresponding to `key`. /// /// By default this just attempts to use `attribute`. - fn typed_attribute(item: &Self::ItemId, key: &K) -> Option { - key.using_encoded(|d| Self::attribute(item, d)) + fn typed_attribute( + item: &Self::ItemId, + namespace: &AttributeNamespace, + key: &K, + ) -> Option { + key.using_encoded(|d| Self::attribute(item, namespace, d)) .and_then(|v| V::decode(&mut &v[..]).ok()) } @@ -137,11 +148,19 @@ impl< fn owner(item: &Self::ItemId) -> Option { >::owner(&A::get(), item) } - fn attribute(item: &Self::ItemId, key: &[u8]) -> Option> { - >::attribute(&A::get(), item, key) + fn attribute( + item: &Self::ItemId, + namespace: &AttributeNamespace, + key: &[u8], + ) -> Option> { + >::attribute(&A::get(), item, namespace, key) } - fn typed_attribute(item: &Self::ItemId, key: &K) -> Option { - >::typed_attribute(&A::get(), item, key) + fn typed_attribute( + item: &Self::ItemId, + namespace: &AttributeNamespace, + key: &K, + ) -> Option { + >::typed_attribute(&A::get(), item, namespace, key) } fn can_transfer(item: &Self::ItemId) -> bool { >::can_transfer(&A::get(), item) diff --git a/frame/support/src/traits/tokens/nonfungibles_v2.rs b/frame/support/src/traits/tokens/nonfungibles_v2.rs index 0aec193f68fcb..5b93ca832d4f1 100644 --- a/frame/support/src/traits/tokens/nonfungibles_v2.rs +++ b/frame/support/src/traits/tokens/nonfungibles_v2.rs @@ -27,7 +27,10 @@ //! Implementations of these traits may be converted to implementations of corresponding //! `nonfungible` traits by using the `nonfungible::ItemOf` type adapter. -use crate::dispatch::{DispatchError, DispatchResult}; +use crate::{ + dispatch::{DispatchError, DispatchResult}, + traits::tokens::misc::AttributeNamespace, +}; use codec::{Decode, Encode}; use sp_runtime::TokenError; use sp_std::prelude::*; @@ -58,6 +61,7 @@ pub trait Inspect { fn attribute( _collection: &Self::CollectionId, _item: &Self::ItemId, + _namespace: &AttributeNamespace, _key: &[u8], ) -> Option> { None @@ -70,9 +74,10 @@ pub trait Inspect { fn typed_attribute( collection: &Self::CollectionId, item: &Self::ItemId, + namespace: &AttributeNamespace, key: &K, ) -> Option { - key.using_encoded(|d| Self::attribute(collection, item, d)) + key.using_encoded(|d| Self::attribute(collection, item, namespace, d)) .and_then(|v| V::decode(&mut &v[..]).ok()) } diff --git a/frame/support/test/tests/pallet_ui/dev_mode_without_arg_max_encoded_len.stderr b/frame/support/test/tests/pallet_ui/dev_mode_without_arg_max_encoded_len.stderr index a5ec31a9bb4e7..bb49e11679028 100644 --- a/frame/support/test/tests/pallet_ui/dev_mode_without_arg_max_encoded_len.stderr +++ b/frame/support/test/tests/pallet_ui/dev_mode_without_arg_max_encoded_len.stderr @@ -13,5 +13,5 @@ error[E0277]: the trait bound `Vec: MaxEncodedLen` is not satisfied (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5) (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5, TupleElement6) (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5, TupleElement6, TupleElement7) - and 78 others + and 80 others = note: required for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageMyStorage, Vec>` to implement `StorageInfoTrait` diff --git a/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.stderr b/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.stderr index 42ef5a34e4c30..999d8585c221a 100644 --- a/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.stderr +++ b/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.stderr @@ -28,7 +28,7 @@ error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied <&[(T,)] as EncodeLike>> <&[(T,)] as EncodeLike>> <&[T] as EncodeLike>> - and 278 others + and 279 others = note: required for `Bar` to implement `FullEncode` = note: required for `Bar` to implement `FullCodec` = note: required for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` to implement `PartialStorageInfoTrait` @@ -69,7 +69,7 @@ error[E0277]: the trait bound `Bar: TypeInfo` is not satisfied (A, B, C, D) (A, B, C, D, E) (A, B, C, D, E, F) - and 161 others + and 162 others = note: required for `Bar` to implement `StaticTypeInfo` = note: required for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` to implement `StorageEntryMetadataBuilder` @@ -103,7 +103,7 @@ error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied <&[(T,)] as EncodeLike>> <&[(T,)] as EncodeLike>> <&[T] as EncodeLike>> - and 278 others + and 279 others = note: required for `Bar` to implement `FullEncode` = note: required for `Bar` to implement `FullCodec` = note: required for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` to implement `StorageEntryMetadataBuilder` diff --git a/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.stderr b/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.stderr index 461d63ebb0d9c..e2870ffb9e86f 100644 --- a/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.stderr +++ b/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.stderr @@ -28,7 +28,7 @@ error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied <&[(T,)] as EncodeLike>> <&[(T,)] as EncodeLike>> <&[T] as EncodeLike>> - and 278 others + and 279 others = note: required for `Bar` to implement `FullEncode` = note: required for `Bar` to implement `FullCodec` = note: required for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` to implement `PartialStorageInfoTrait` @@ -69,7 +69,7 @@ error[E0277]: the trait bound `Bar: TypeInfo` is not satisfied (A, B, C, D) (A, B, C, D, E) (A, B, C, D, E, F) - and 161 others + and 162 others = note: required for `Bar` to implement `StaticTypeInfo` = note: required for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` to implement `StorageEntryMetadataBuilder` @@ -103,7 +103,7 @@ error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied <&[(T,)] as EncodeLike>> <&[(T,)] as EncodeLike>> <&[T] as EncodeLike>> - and 278 others + and 279 others = note: required for `Bar` to implement `FullEncode` = note: required for `Bar` to implement `FullCodec` = note: required for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` to implement `StorageEntryMetadataBuilder` diff --git a/frame/support/test/tests/pallet_ui/storage_info_unsatisfied.stderr b/frame/support/test/tests/pallet_ui/storage_info_unsatisfied.stderr index cce9fa70b3da5..d5b0c3b50a5ac 100644 --- a/frame/support/test/tests/pallet_ui/storage_info_unsatisfied.stderr +++ b/frame/support/test/tests/pallet_ui/storage_info_unsatisfied.stderr @@ -13,5 +13,5 @@ error[E0277]: the trait bound `Bar: MaxEncodedLen` is not satisfied (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5) (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5, TupleElement6) (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5, TupleElement6, TupleElement7) - and 78 others + and 80 others = note: required for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` to implement `StorageInfoTrait` diff --git a/frame/support/test/tests/pallet_ui/storage_info_unsatisfied_nmap.stderr b/frame/support/test/tests/pallet_ui/storage_info_unsatisfied_nmap.stderr index 877485dda2084..6b174d13c5778 100644 --- a/frame/support/test/tests/pallet_ui/storage_info_unsatisfied_nmap.stderr +++ b/frame/support/test/tests/pallet_ui/storage_info_unsatisfied_nmap.stderr @@ -13,6 +13,6 @@ error[E0277]: the trait bound `Bar: MaxEncodedLen` is not satisfied (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5) (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5, TupleElement6) (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5, TupleElement6, TupleElement7) - and 78 others + and 80 others = note: required for `Key` to implement `KeyGeneratorMaxEncodedLen` = note: required for `frame_support::pallet_prelude::StorageNMap<_GeneratedPrefixForStorageFoo, Key, u32>` to implement `StorageInfoTrait` From b4ff566c45755c89ae5a61849d87124a2980afe5 Mon Sep 17 00:00:00 2001 From: Jegor Sidorenko <5252494+jsidorenko@users.noreply.github.com> Date: Wed, 23 Nov 2022 11:32:40 +0200 Subject: [PATCH 026/101] [Uniques V2] Final improvements (#12736) * Use KeyPrefixIterator instead of Box * Change create_collection() * Restrict from claiming NFTs twice * Update Readme * Remove dead code * Refactoring * Update readme * Fix clippy --- bin/node/runtime/src/lib.rs | 2 + frame/nfts/README.md | 112 +++++++++++------- frame/nfts/src/features/attributes.rs | 14 +++ frame/nfts/src/impl_nonfungibles.rs | 37 ++++-- frame/nfts/src/lib.rs | 49 ++++++-- frame/nfts/src/mock.rs | 3 + frame/nfts/src/tests.rs | 6 + frame/nfts/src/types.rs | 6 + .../src/traits/tokens/nonfungible_v2.rs | 17 ++- .../src/traits/tokens/nonfungibles_v2.rs | 20 +++- 10 files changed, 190 insertions(+), 76 deletions(-) diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index 5b6cb2ec574d3..bf2e7cb9b8c7c 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -1519,6 +1519,7 @@ impl pallet_uniques::Config for Runtime { parameter_types! { pub Features: PalletFeatures = PalletFeatures::all_enabled(); + pub const NftsPalletId: PalletId = PalletId(*b"py/nfts_"); } impl pallet_nfts::Config for Runtime { @@ -1541,6 +1542,7 @@ impl pallet_nfts::Config for Runtime { type MaxDeadlineDuration = MaxDeadlineDuration; type Features = Features; type WeightInfo = pallet_nfts::weights::SubstrateWeight; + type PalletId = NftsPalletId; #[cfg(feature = "runtime-benchmarks")] type Helper = (); type CreateOrigin = AsEnsureOriginWithArg>; diff --git a/frame/nfts/README.md b/frame/nfts/README.md index 8a91a558b5b5f..7de4b9440e7f5 100644 --- a/frame/nfts/README.md +++ b/frame/nfts/README.md @@ -1,72 +1,100 @@ -# Uniques Module +# NFTs pallet -A simple, secure module for dealing with non-fungible assets. +A pallet for dealing with non-fungible assets. ## Overview -The Uniques module provides functionality for asset management of non-fungible asset classes, including: +The NFTs pallet provides functionality for non-fungible tokens' management, including: -* Asset Issuance -* Asset Transfer -* Asset Destruction +* Collection Creation +* NFT Minting +* NFT Transfers and Atomic Swaps +* NFT Trading methods +* Attributes Management +* NFT Burning -To use it in your runtime, you need to implement the assets [`uniques::Config`](https://paritytech.github.io/substrate/master/pallet_uniques/pallet/trait.Config.html). +To use it in your runtime, you need to implement [`nfts::Config`](https://paritytech.github.io/substrate/master/pallet_nfts/pallet/trait.Config.html). -The supported dispatchable functions are documented in the [`uniques::Call`](https://paritytech.github.io/substrate/master/pallet_uniques/pallet/enum.Call.html) enum. +The supported dispatchable functions are documented in the [`nfts::Call`](https://paritytech.github.io/substrate/master/pallet_nfts/pallet/enum.Call.html) enum. ### Terminology -* **Asset issuance:** The creation of a new asset instance. -* **Asset transfer:** The action of transferring an asset instance from one account to another. -* **Asset burning:** The destruction of an asset instance. -* **Non-fungible asset:** An asset for which each unit has unique characteristics. There is exactly - one instance of such an asset in existence and there is exactly one owning account. +* **Collection creation:** The creation of a new collection. +* **NFT minting:** The action of creating a new item within a collection. +* **NFT transfer:** The action of sending an item from one account to another. +* **Atomic swap:** The action of exchanging items between accounts without needing a 3rd party service. +* **NFT burning:** The destruction of an item. +* **Non-fungible token (NFT):** An item for which each unit has unique characteristics. There is exactly + one instance of such an item in existence and there is exactly one owning account (though that owning account could be a proxy account or multi-sig account). +* **Soul Bound NFT:** An item that is non-transferable from the account which it is minted into. ### Goals -The Uniques pallet in Substrate is designed to make the following possible: +The NFTs pallet in Substrate is designed to make the following possible: -* Allow accounts to permissionlessly create asset classes (collections of asset instances). -* Allow a named (permissioned) account to mint and burn unique assets within a class. -* Move asset instances between accounts permissionlessly. -* Allow a named (permissioned) account to freeze and unfreeze unique assets within a - class or the entire class. -* Allow the owner of an asset instance to delegate the ability to transfer the asset to some +* Allow accounts to permissionlessly create nft collections. +* Allow a named (permissioned) account to mint and burn unique items within a collection. +* Move items between accounts permissionlessly. +* Allow a named (permissioned) account to freeze and unfreeze items within a + collection or the entire collection. +* Allow the owner of an item to delegate the ability to transfer the item to some named third-party. +* Allow third-parties to store information in an NFT _without_ owning it (Eg. save game state). ## Interface ### Permissionless dispatchables -* `create`: Create a new asset class by placing a deposit. -* `transfer`: Transfer an asset instance to a new owner. -* `redeposit`: Update the deposit amount of an asset instance, potentially freeing funds. -* `approve_transfer`: Name a delegate who may authorise a transfer. + +* `create`: Create a new collection by placing a deposit. +* `mint`: Mint a new item within a collection (when the minting is public). +* `transfer`: Send an item to a new owner. +* `redeposit`: Update the deposit amount of an item, potentially freeing funds. +* `approve_transfer`: Name a delegate who may authorize a transfer. * `cancel_approval`: Revert the effects of a previous `approve_transfer`. +* `approve_item_attributes`: Name a delegate who may change item's attributes within a namespace. +* `cancel_item_attributes_approval`: Revert the effects of a previous `approve_item_attributes`. +* `set_price`: Set the price for an item. +* `buy_item`: Buy an item. +* `pay_tips`: Pay tips, could be used for paying the creator royalties. +* `create_swap`: Create an offer to swap an NFT for another NFT and optionally some fungibles. +* `cancel_swap`: Cancel previously created swap offer. +* `claim_swap`: Swap items in an atomic way. + ### Permissioned dispatchables -* `destroy`: Destroy an asset class. -* `mint`: Mint a new asset instance within an asset class. -* `burn`: Burn an asset instance within an asset class. -* `freeze`: Prevent an individual asset from being transferred. -* `thaw`: Revert the effects of a previous `freeze`. -* `freeze_class`: Prevent all asset within a class from being transferred. -* `thaw_class`: Revert the effects of a previous `freeze_class`. -* `transfer_ownership`: Alter the owner of an asset class, moving all associated deposits. -* `set_team`: Alter the permissioned accounts of an asset class. + +* `destroy`: Destroy a collection. This destroys all the items inside the collection and refunds the deposit. +* `force_mint`: Mint a new item within a collection. +* `burn`: Destroy an item within a collection. +* `lock_item_transfer`: Prevent an individual item from being transferred. +* `unlock_item_transfer`: Revert the effects of a previous `lock_item_transfer`. +* `clear_all_transfer_approvals`: Clears all transfer approvals set by calling the `approve_transfer`. +* `lock_collection`: Prevent all items within a collection from being transferred (making them all `soul bound`). +* `lock_item_properties`: Lock item's metadata or attributes. +* `transfer_ownership`: Alter the owner of a collection, moving all associated deposits. (Ownership of individual items will not be affected.) +* `set_team`: Alter the permissioned accounts of a collection. +* `set_collection_max_supply`: Change the max supply of a collection. +* `update_mint_settings`: Update the minting settings for collection. + ### Metadata (permissioned) dispatchables -* `set_attribute`: Set a metadata attribute of an asset instance or class. -* `clear_attribute`: Remove a metadata attribute of an asset instance or class. -* `set_metadata`: Set general metadata of an asset instance. -* `clear_metadata`: Remove general metadata of an asset instance. -* `set_class_metadata`: Set general metadata of an asset class. -* `clear_class_metadata`: Remove general metadata of an asset class. + +* `set_attribute`: Set a metadata attribute of an item or collection. +* `clear_attribute`: Remove a metadata attribute of an item or collection. +* `set_metadata`: Set general metadata of an item (E.g. an IPFS address of an image url). +* `clear_metadata`: Remove general metadata of an item. +* `set_collection_metadata`: Set general metadata of a collection. +* `clear_collection_metadata`: Remove general metadata of a collection. + ### Force (i.e. governance) dispatchables -* `force_create`: Create a new asset class. -* `force_asset_status`: Alter the underlying characteristics of an asset class. -Please refer to the [`Call`](https://paritytech.github.io/substrate/master/pallet_uniques/pallet/enum.Call.html) enum +* `force_create`: Create a new collection (the collection id can not be chosen). +* `force_collection_owner`: Change collection's owner. +* `force_collection_config`: Change collection's config. +* `force_set_attribute`: Set an attribute. + +Please refer to the [`Call`](https://paritytech.github.io/substrate/master/pallet_nfts/pallet/enum.Call.html) enum and its associated variants for documentation on each function. ## Related Modules diff --git a/frame/nfts/src/features/attributes.rs b/frame/nfts/src/features/attributes.rs index 0d65a1169323b..48e9c31d2a9bb 100644 --- a/frame/nfts/src/features/attributes.rs +++ b/frame/nfts/src/features/attributes.rs @@ -304,4 +304,18 @@ impl, I: 'static> Pallet { }; Ok(result) } + + /// A helper method to construct attribute's key. + pub fn construct_attribute_key( + key: Vec, + ) -> Result, DispatchError> { + Ok(BoundedVec::try_from(key).map_err(|_| Error::::IncorrectData)?) + } + + /// A helper method to construct attribute's value. + pub fn construct_attribute_value( + value: Vec, + ) -> Result, DispatchError> { + Ok(BoundedVec::try_from(value).map_err(|_| Error::::IncorrectData)?) + } } diff --git a/frame/nfts/src/impl_nonfungibles.rs b/frame/nfts/src/impl_nonfungibles.rs index a9e05a6f41ce9..574d256a7705b 100644 --- a/frame/nfts/src/impl_nonfungibles.rs +++ b/frame/nfts/src/impl_nonfungibles.rs @@ -20,6 +20,7 @@ use super::*; use frame_support::{ ensure, + storage::KeyPrefixIterator, traits::{tokens::nonfungibles_v2::*, Get}, BoundedSlice, }; @@ -104,24 +105,28 @@ impl, I: 'static> Create<::AccountId, Collection { /// Create a `collection` of nonfungible items to be owned by `who` and managed by `admin`. fn create_collection( - collection: &Self::CollectionId, who: &T::AccountId, admin: &T::AccountId, config: &CollectionConfigFor, - ) -> DispatchResult { + ) -> Result { // DepositRequired can be disabled by calling the force_create() only ensure!( !config.has_disabled_setting(CollectionSetting::DepositRequired), Error::::WrongSetting ); + + let collection = + NextCollectionId::::get().unwrap_or(T::CollectionId::initial_value()); + Self::do_create_collection( - *collection, + collection, who.clone(), admin.clone(), *config, T::CollectionDeposit::get(), - Event::Created { collection: *collection, creator: who.clone(), owner: admin.clone() }, - ) + Event::Created { collection, creator: who.clone(), owner: admin.clone() }, + )?; + Ok(collection) } } @@ -186,25 +191,31 @@ impl, I: 'static> Transfer for Pallet { } impl, I: 'static> InspectEnumerable for Pallet { + type CollectionsIterator = KeyPrefixIterator<>::CollectionId>; + type ItemsIterator = KeyPrefixIterator<>::ItemId>; + type OwnedIterator = + KeyPrefixIterator<(>::CollectionId, >::ItemId)>; + type OwnedInCollectionIterator = KeyPrefixIterator<>::ItemId>; + /// Returns an iterator of the collections in existence. /// /// NOTE: iterating this list invokes a storage read per item. - fn collections() -> Box> { - Box::new(CollectionMetadataOf::::iter_keys()) + fn collections() -> Self::CollectionsIterator { + Collection::::iter_keys() } /// Returns an iterator of the items of a `collection` in existence. /// /// NOTE: iterating this list invokes a storage read per item. - fn items(collection: &Self::CollectionId) -> Box> { - Box::new(ItemMetadataOf::::iter_key_prefix(collection)) + fn items(collection: &Self::CollectionId) -> Self::ItemsIterator { + Item::::iter_key_prefix(collection) } /// Returns an iterator of the items of all collections owned by `who`. /// /// NOTE: iterating this list invokes a storage read per item. - fn owned(who: &T::AccountId) -> Box> { - Box::new(Account::::iter_key_prefix((who,))) + fn owned(who: &T::AccountId) -> Self::OwnedIterator { + Account::::iter_key_prefix((who,)) } /// Returns an iterator of the items of `collection` owned by `who`. @@ -213,7 +224,7 @@ impl, I: 'static> InspectEnumerable for Pallet fn owned_in_collection( collection: &Self::CollectionId, who: &T::AccountId, - ) -> Box> { - Box::new(Account::::iter_key_prefix((who, collection))) + ) -> Self::OwnedInCollectionIterator { + Account::::iter_key_prefix((who, collection)) } } diff --git a/frame/nfts/src/lib.rs b/frame/nfts/src/lib.rs index 8de9f3103e7c2..f4d157d1d1cda 100644 --- a/frame/nfts/src/lib.rs +++ b/frame/nfts/src/lib.rs @@ -65,7 +65,7 @@ type AccountIdLookupOf = <::Lookup as StaticLookup>::Sourc #[frame_support::pallet] pub mod pallet { use super::*; - use frame_support::{pallet_prelude::*, traits::ExistenceRequirement}; + use frame_support::{pallet_prelude::*, traits::ExistenceRequirement, PalletId}; use frame_system::pallet_prelude::*; #[pallet::pallet] @@ -171,6 +171,10 @@ pub mod pallet { #[pallet::constant] type Features: Get; + /// The pallet's id. + #[pallet::constant] + type PalletId: Get; + #[cfg(feature = "runtime-benchmarks")] /// A set of helper functions for benchmarking. type Helper: BenchmarkHelper; @@ -583,6 +587,10 @@ pub mod pallet { MintNotStated, /// Mint has already ended. MintEnded, + /// The provided Item was already used for claiming. + AlreadyClaimed, + /// The provided data is incorrect. + IncorrectData, } #[pallet::call] @@ -756,16 +764,35 @@ pub mod pallet { ) }, MintType::HolderOf(collection_id) => { - let correct_witness = match witness_data { - Some(MintWitness { owner_of_item }) => - Account::::contains_key(( - &caller, - &collection_id, - &owner_of_item, - )), - None => false, - }; - ensure!(correct_witness, Error::::BadWitness) + let MintWitness { owner_of_item } = + witness_data.ok_or(Error::::BadWitness)?; + + let has_item = Account::::contains_key(( + &caller, + &collection_id, + &owner_of_item, + )); + ensure!(has_item, Error::::BadWitness); + + let attribute_key = Self::construct_attribute_key( + PalletAttributes::::UsedToClaim(collection) + .encode(), + )?; + + let key = ( + &collection_id, + Some(owner_of_item), + AttributeNamespace::Pallet(T::PalletId::get()), + &attribute_key, + ); + let already_claimed = Attribute::::contains_key(key.clone()); + ensure!(!already_claimed, Error::::AlreadyClaimed); + + let value = Self::construct_attribute_value(vec![0])?; + Attribute::::insert( + key, + (value, AttributeDeposit { account: None, amount: Zero::zero() }), + ); }, _ => {}, } diff --git a/frame/nfts/src/mock.rs b/frame/nfts/src/mock.rs index f814b209d5f78..78aebb9471481 100644 --- a/frame/nfts/src/mock.rs +++ b/frame/nfts/src/mock.rs @@ -23,6 +23,7 @@ use crate as pallet_nfts; use frame_support::{ construct_runtime, parameter_types, traits::{AsEnsureOriginWithArg, ConstU32, ConstU64}, + PalletId, }; use sp_core::H256; use sp_runtime::{ @@ -86,6 +87,7 @@ impl pallet_balances::Config for Test { parameter_types! { pub storage Features: PalletFeatures = PalletFeatures::all_enabled(); + pub const NftsPalletId: PalletId = PalletId(*b"py/nfts_"); } impl Config for Test { @@ -110,6 +112,7 @@ impl Config for Test { type MaxDeadlineDuration = ConstU64<10000>; type Features = Features; type WeightInfo = (); + type PalletId = NftsPalletId; #[cfg(feature = "runtime-benchmarks")] type Helper = (); } diff --git a/frame/nfts/src/tests.rs b/frame/nfts/src/tests.rs index 1e057a8b58d6d..7cbd7ff6c36f7 100644 --- a/frame/nfts/src/tests.rs +++ b/frame/nfts/src/tests.rs @@ -276,6 +276,12 @@ fn mint_should_work() { 42, Some(MintWitness { owner_of_item: 43 }) )); + + // can't mint twice + assert_noop!( + Nfts::mint(RuntimeOrigin::signed(2), 1, 46, Some(MintWitness { owner_of_item: 43 })), + Error::::AlreadyClaimed + ); }); } diff --git a/frame/nfts/src/types.rs b/frame/nfts/src/types.rs index c12ae39877d46..5f13fb72eb33f 100644 --- a/frame/nfts/src/types.rs +++ b/frame/nfts/src/types.rs @@ -291,6 +291,12 @@ pub struct CancelAttributesApprovalWitness { pub account_attributes: u32, } +#[derive(Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug, TypeInfo, MaxEncodedLen)] +pub enum PalletAttributes { + /// Marks an item as being used in order to claim another item. + UsedToClaim(CollectionId), +} + #[derive( Clone, Copy, Decode, Default, Encode, MaxEncodedLen, PartialEq, RuntimeDebug, TypeInfo, )] diff --git a/frame/support/src/traits/tokens/nonfungible_v2.rs b/frame/support/src/traits/tokens/nonfungible_v2.rs index cd091791821ed..ab0e72b3c8286 100644 --- a/frame/support/src/traits/tokens/nonfungible_v2.rs +++ b/frame/support/src/traits/tokens/nonfungible_v2.rs @@ -76,11 +76,16 @@ pub trait Inspect { /// Interface for enumerating items in existence or owned by a given account over a collection /// of NFTs. pub trait InspectEnumerable: Inspect { + /// The iterator type for [`Self::items`]. + type ItemsIterator: Iterator; + /// The iterator type for [`Self::owned`]. + type OwnedIterator: Iterator; + /// Returns an iterator of the items within a `collection` in existence. - fn items() -> Box>; + fn items() -> Self::ItemsIterator; /// Returns an iterator of the items of all collections owned by `who`. - fn owned(who: &AccountId) -> Box>; + fn owned(who: &AccountId) -> Self::OwnedIterator; } /// Trait for providing an interface for NFT-like items which may be minted, burned and/or have @@ -173,10 +178,14 @@ impl< AccountId, > InspectEnumerable for ItemOf { - fn items() -> Box> { + type ItemsIterator = >::ItemsIterator; + type OwnedIterator = + >::OwnedInCollectionIterator; + + fn items() -> Self::ItemsIterator { >::items(&A::get()) } - fn owned(who: &AccountId) -> Box> { + fn owned(who: &AccountId) -> Self::OwnedIterator { >::owned_in_collection(&A::get(), who) } } diff --git a/frame/support/src/traits/tokens/nonfungibles_v2.rs b/frame/support/src/traits/tokens/nonfungibles_v2.rs index 5b93ca832d4f1..09b4793832d7e 100644 --- a/frame/support/src/traits/tokens/nonfungibles_v2.rs +++ b/frame/support/src/traits/tokens/nonfungibles_v2.rs @@ -110,31 +110,39 @@ pub trait Inspect { /// Interface for enumerating items in existence or owned by a given account over many collections /// of NFTs. pub trait InspectEnumerable: Inspect { + /// The iterator type for [`Self::collections`]. + type CollectionsIterator: Iterator; + /// The iterator type for [`Self::items`]. + type ItemsIterator: Iterator; + /// The iterator type for [`Self::owned`]. + type OwnedIterator: Iterator; + /// The iterator type for [`Self::owned_in_collection`]. + type OwnedInCollectionIterator: Iterator; + /// Returns an iterator of the collections in existence. - fn collections() -> Box>; + fn collections() -> Self::CollectionsIterator; /// Returns an iterator of the items of a `collection` in existence. - fn items(collection: &Self::CollectionId) -> Box>; + fn items(collection: &Self::CollectionId) -> Self::ItemsIterator; /// Returns an iterator of the items of all collections owned by `who`. - fn owned(who: &AccountId) -> Box>; + fn owned(who: &AccountId) -> Self::OwnedIterator; /// Returns an iterator of the items of `collection` owned by `who`. fn owned_in_collection( collection: &Self::CollectionId, who: &AccountId, - ) -> Box>; + ) -> Self::OwnedInCollectionIterator; } /// Trait for providing the ability to create collections of nonfungible items. pub trait Create: Inspect { /// Create a `collection` of nonfungible items to be owned by `who` and managed by `admin`. fn create_collection( - collection: &Self::CollectionId, who: &AccountId, admin: &AccountId, config: &CollectionConfig, - ) -> DispatchResult; + ) -> Result; } /// Trait for providing the ability to destroy collections of nonfungible items. From 4c1b7f763811a9392eaa7d21d8730e08f7b259ed Mon Sep 17 00:00:00 2001 From: Jegor Sidorenko <5252494+jsidorenko@users.noreply.github.com> Date: Wed, 23 Nov 2022 16:28:20 +0200 Subject: [PATCH 027/101] Update frame/nfts/src/lib.rs Co-authored-by: Squirrel --- frame/nfts/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/frame/nfts/src/lib.rs b/frame/nfts/src/lib.rs index f4d157d1d1cda..ea379c4a5aa0b 100644 --- a/frame/nfts/src/lib.rs +++ b/frame/nfts/src/lib.rs @@ -310,7 +310,7 @@ pub mod pallet { >; /// Stores the `CollectionId` that is going to be used for the next collection. - /// This gets incremented by 1 whenever a new collection is created. + /// This gets incremented whenever a new collection is created. #[pallet::storage] pub(super) type NextCollectionId, I: 'static = ()> = StorageValue<_, T::CollectionId, OptionQuery>; From beb8caa3c35760dd972ddb0400bf10914959c32a Mon Sep 17 00:00:00 2001 From: command-bot <> Date: Fri, 25 Nov 2022 15:19:02 +0000 Subject: [PATCH 028/101] ".git/.scripts/bench-bot.sh" pallet dev pallet_nfts --- frame/nfts/src/weights.rs | 936 +++++++++++++++++++++----------------- 1 file changed, 508 insertions(+), 428 deletions(-) diff --git a/frame/nfts/src/weights.rs b/frame/nfts/src/weights.rs index a7eb3773f2ae8..67f65b10e98e3 100644 --- a/frame/nfts/src/weights.rs +++ b/frame/nfts/src/weights.rs @@ -7,7 +7,7 @@ // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, @@ -18,7 +18,7 @@ //! Autogenerated weights for pallet_nfts //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2022-10-03, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2022-11-25, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` //! HOSTNAME: `bm3`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 @@ -32,8 +32,10 @@ // --execution=wasm // --wasm-execution=compiled // --heap-pages=4096 +// --json-file=/var/lib/gitlab-runner/builds/zyw4fam_/0/parity/mirrors/substrate/.git/.artifacts/bench.json // --pallet=pallet_nfts // --chain=dev +// --header=./HEADER-APACHE2 // --output=./frame/nfts/src/weights.rs // --template=./.maintain/frame-weight-template.hbs @@ -48,7 +50,7 @@ use sp_std::marker::PhantomData; pub trait WeightInfo { fn create() -> Weight; fn force_create() -> Weight; - fn destroy(n: u32, m: u32, a: u32, ) -> Weight; + fn destroy(n: u32, ) -> Weight; fn mint() -> Weight; fn force_mint() -> Weight; fn burn() -> Weight; @@ -66,7 +68,7 @@ pub trait WeightInfo { fn force_set_attribute() -> Weight; fn clear_attribute() -> Weight; fn approve_item_attributes() -> Weight; - fn cancel_item_attributes_approval() -> Weight; + fn cancel_item_attributes_approval(n: u32, ) -> Weight; fn set_metadata() -> Weight; fn clear_metadata() -> Weight; fn set_collection_metadata() -> Weight; @@ -89,669 +91,747 @@ pub trait WeightInfo { pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { // Storage: Nfts NextCollectionId (r:1 w:1) - // Storage: Nfts Class (r:1 w:1) - // Storage: Nfts ClassAccount (r:0 w:1) + // Storage: Nfts Collection (r:1 w:1) + // Storage: Nfts CollectionRoleOf (r:0 w:1) // Storage: Nfts CollectionConfigOf (r:0 w:1) + // Storage: Nfts CollectionAccount (r:0 w:1) fn create() -> Weight { - Weight::from_ref_time(38_062_000 as u64) - .saturating_add(T::DbWeight::get().reads(2 as u64)) - .saturating_add(T::DbWeight::get().writes(4 as u64)) + // Minimum execution time: 42_075 nanoseconds. + Weight::from_ref_time(42_614_000) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(5)) } // Storage: Nfts NextCollectionId (r:1 w:1) - // Storage: Nfts Class (r:1 w:1) - // Storage: Nfts ClassAccount (r:0 w:1) + // Storage: Nfts Collection (r:1 w:1) + // Storage: Nfts CollectionRoleOf (r:0 w:1) // Storage: Nfts CollectionConfigOf (r:0 w:1) + // Storage: Nfts CollectionAccount (r:0 w:1) fn force_create() -> Weight { - Weight::from_ref_time(25_917_000 as u64) - .saturating_add(T::DbWeight::get().reads(2 as u64)) - .saturating_add(T::DbWeight::get().writes(4 as u64)) - } - // Storage: Nfts Class (r:1 w:1) - // Storage: Nfts Asset (r:1 w:0) - // Storage: Nfts ClassAccount (r:0 w:1) - // Storage: Nfts ClassMetadataOf (r:0 w:1) + // Minimum execution time: 29_799 nanoseconds. + Weight::from_ref_time(30_511_000) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(5)) + } + // Storage: Nfts Collection (r:1 w:1) + // Storage: Nfts Item (r:1 w:0) + // Storage: Nfts Attribute (r:1 w:0) + // Storage: Nfts CollectionRoleOf (r:0 w:1) + // Storage: Nfts CollectionMetadataOf (r:0 w:1) // Storage: Nfts CollectionConfigOf (r:0 w:1) - // Storage: Nfts CollectionMaxSupply (r:0 w:1) - // Storage: Nfts Attribute (r:0 w:20) - // Storage: Nfts InstanceMetadataOf (r:0 w:20) + // Storage: Nfts CollectionAccount (r:0 w:1) + // Storage: Nfts ItemMetadataOf (r:0 w:20) // Storage: Nfts ItemConfigOf (r:0 w:20) // Storage: Nfts Account (r:0 w:20) /// The range of component `n` is `[0, 1000]`. - /// The range of component `m` is `[0, 1000]`. - /// The range of component `a` is `[0, 1000]`. - fn destroy(n: u32, m: u32, a: u32, ) -> Weight { - Weight::from_ref_time(55_419_000 as u64) - // Standard Error: 18_623 - .saturating_add(Weight::from_ref_time(12_843_237 as u64).saturating_mul(n as u64)) - // Standard Error: 27_329 - .saturating_add(Weight::from_ref_time(315_839 as u64).saturating_mul(m as u64)) - // Standard Error: 27_329 - .saturating_add(Weight::from_ref_time(217_497 as u64).saturating_mul(a as u64)) - .saturating_add(T::DbWeight::get().reads(2 as u64)) - .saturating_add(T::DbWeight::get().reads((1 as u64).saturating_mul(n as u64))) - .saturating_add(T::DbWeight::get().writes(5 as u64)) - .saturating_add(T::DbWeight::get().writes((5 as u64).saturating_mul(n as u64))) - } - // Storage: Nfts Asset (r:1 w:1) - // Storage: Nfts Class (r:1 w:1) - // Storage: Nfts CollectionMaxSupply (r:1 w:0) + fn destroy(n: u32, ) -> Weight { + // Minimum execution time: 65_846 nanoseconds. + Weight::from_ref_time(66_082_000) + // Standard Error: 27_878 + .saturating_add(Weight::from_ref_time(26_747_590).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().reads((2_u64).saturating_mul(n.into()))) + .saturating_add(T::DbWeight::get().writes(5)) + .saturating_add(T::DbWeight::get().writes((5_u64).saturating_mul(n.into()))) + } // Storage: Nfts CollectionConfigOf (r:1 w:0) - // Storage: Nfts ItemConfigOf (r:0 w:1) + // Storage: Nfts Item (r:1 w:1) + // Storage: Nfts Collection (r:1 w:1) + // Storage: Nfts CollectionRoleOf (r:1 w:0) + // Storage: Nfts ItemConfigOf (r:1 w:1) // Storage: Nfts Account (r:0 w:1) fn mint() -> Weight { - Weight::from_ref_time(47_947_000 as u64) - .saturating_add(T::DbWeight::get().reads(4 as u64)) - .saturating_add(T::DbWeight::get().writes(4 as u64)) - } - // Storage: Nfts Asset (r:1 w:1) - // Storage: Nfts Class (r:1 w:1) - // Storage: Nfts CollectionMaxSupply (r:1 w:0) + // Minimum execution time: 58_577 nanoseconds. + Weight::from_ref_time(59_058_000) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(4)) + } + // Storage: Nfts CollectionRoleOf (r:1 w:0) + // Storage: Nfts Item (r:1 w:1) + // Storage: Nfts Collection (r:1 w:1) // Storage: Nfts CollectionConfigOf (r:1 w:0) - // Storage: Nfts ItemConfigOf (r:0 w:1) + // Storage: Nfts ItemConfigOf (r:1 w:1) // Storage: Nfts Account (r:0 w:1) fn force_mint() -> Weight { - Weight::from_ref_time(47_947_000 as u64) - .saturating_add(T::DbWeight::get().reads(4 as u64)) - .saturating_add(T::DbWeight::get().writes(4 as u64)) - } - // Storage: Nfts Class (r:1 w:1) - // Storage: Nfts Asset (r:1 w:1) - // Storage: Nfts ItemConfigOf (r:0 w:1) + // Minimum execution time: 56_494 nanoseconds. + Weight::from_ref_time(57_565_000) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(4)) + } + // Storage: Nfts Collection (r:1 w:1) + // Storage: Nfts Item (r:1 w:1) + // Storage: Nfts CollectionRoleOf (r:1 w:0) + // Storage: Nfts ItemConfigOf (r:1 w:1) // Storage: Nfts Account (r:0 w:1) // Storage: Nfts ItemPriceOf (r:0 w:1) + // Storage: Nfts ItemAttributesApprovalsOf (r:0 w:1) // Storage: Nfts PendingSwapOf (r:0 w:1) fn burn() -> Weight { - Weight::from_ref_time(47_193_000 as u64) - .saturating_add(T::DbWeight::get().reads(2 as u64)) - .saturating_add(T::DbWeight::get().writes(5 as u64)) + // Minimum execution time: 59_393 nanoseconds. + Weight::from_ref_time(60_562_000) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(7)) } - // Storage: Nfts Class (r:1 w:0) + // Storage: Nfts Collection (r:1 w:0) // Storage: Nfts CollectionConfigOf (r:1 w:0) // Storage: Nfts ItemConfigOf (r:1 w:0) - // Storage: Nfts Asset (r:1 w:1) + // Storage: Nfts Item (r:1 w:1) + // Storage: Nfts CollectionRoleOf (r:1 w:0) + // Storage: System Account (r:1 w:1) // Storage: Nfts Account (r:0 w:2) // Storage: Nfts ItemPriceOf (r:0 w:1) // Storage: Nfts PendingSwapOf (r:0 w:1) fn transfer() -> Weight { - Weight::from_ref_time(42_305_000 as u64) - .saturating_add(T::DbWeight::get().reads(4 as u64)) - .saturating_add(T::DbWeight::get().writes(4 as u64)) + // Minimum execution time: 65_852 nanoseconds. + Weight::from_ref_time(66_308_000) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(6)) } - // Storage: Nfts Class (r:1 w:1) + // Storage: Nfts Collection (r:1 w:0) // Storage: Nfts CollectionConfigOf (r:1 w:0) - // Storage: Nfts Asset (r:102 w:102) + // Storage: Nfts Item (r:102 w:102) /// The range of component `i` is `[0, 5000]`. fn redeposit(i: u32, ) -> Weight { - Weight::from_ref_time(26_327_000 as u64) - // Standard Error: 10_090 - .saturating_add(Weight::from_ref_time(10_876_864 as u64).saturating_mul(i as u64)) - .saturating_add(T::DbWeight::get().reads(2 as u64)) - .saturating_add(T::DbWeight::get().reads((1 as u64).saturating_mul(i as u64))) - .saturating_add(T::DbWeight::get().writes(1 as u64)) - .saturating_add(T::DbWeight::get().writes((1 as u64).saturating_mul(i as u64))) - } - // Storage: Nfts Class (r:1 w:0) + // Minimum execution time: 25_795 nanoseconds. + Weight::from_ref_time(26_128_000) + // Standard Error: 10_295 + .saturating_add(Weight::from_ref_time(11_202_286).saturating_mul(i.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(i.into()))) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(i.into()))) + } + // Storage: Nfts CollectionRoleOf (r:1 w:0) // Storage: Nfts ItemConfigOf (r:1 w:1) fn lock_item_transfer() -> Weight { - Weight::from_ref_time(28_194_000 as u64) - .saturating_add(T::DbWeight::get().reads(2 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + // Minimum execution time: 29_090 nanoseconds. + Weight::from_ref_time(29_772_000) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) } - // Storage: Nfts Class (r:1 w:0) + // Storage: Nfts CollectionRoleOf (r:1 w:0) // Storage: Nfts ItemConfigOf (r:1 w:1) fn unlock_item_transfer() -> Weight { - Weight::from_ref_time(28_821_000 as u64) - .saturating_add(T::DbWeight::get().reads(2 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + // Minimum execution time: 28_947 nanoseconds. + Weight::from_ref_time(29_559_000) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) } - // Storage: Nfts Class (r:1 w:0) + // Storage: Nfts CollectionRoleOf (r:1 w:0) // Storage: Nfts CollectionConfigOf (r:1 w:1) fn lock_collection() -> Weight { - Weight::from_ref_time(25_896_000 as u64) - .saturating_add(T::DbWeight::get().reads(2 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + // Minimum execution time: 26_972 nanoseconds. + Weight::from_ref_time(27_803_000) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) } // Storage: Nfts OwnershipAcceptance (r:1 w:1) - // Storage: Nfts Class (r:1 w:1) - // Storage: Nfts ClassAccount (r:0 w:2) + // Storage: Nfts Collection (r:1 w:1) + // Storage: Nfts CollectionAccount (r:0 w:2) fn transfer_ownership() -> Weight { - Weight::from_ref_time(32_728_000 as u64) - .saturating_add(T::DbWeight::get().reads(2 as u64)) - .saturating_add(T::DbWeight::get().writes(4 as u64)) + // Minimum execution time: 32_165 nanoseconds. + Weight::from_ref_time(32_926_000) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(4)) } - // Storage: Nfts Class (r:1 w:1) + // Storage: Nfts Collection (r:1 w:1) + // Storage: Nfts CollectionRoleOf (r:0 w:4) fn set_team() -> Weight { - Weight::from_ref_time(24_805_000 as u64) - .saturating_add(T::DbWeight::get().reads(1 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + // Minimum execution time: 35_375 nanoseconds. + Weight::from_ref_time(35_950_000) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(5)) } - // Storage: Nfts Class (r:1 w:1) - // Storage: Nfts ClassAccount (r:0 w:1) - // Storage: Nfts CollectionConfigOf (r:0 w:1) + // Storage: Nfts Collection (r:1 w:1) + // Storage: Nfts CollectionAccount (r:0 w:2) fn force_collection_owner() -> Weight { - Weight::from_ref_time(28_468_000 as u64) - .saturating_add(T::DbWeight::get().reads(1 as u64)) - .saturating_add(T::DbWeight::get().writes(3 as u64)) + // Minimum execution time: 27_470 nanoseconds. + Weight::from_ref_time(27_855_000) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(3)) } - // Storage: Nfts Class (r:1 w:1) - // Storage: Nfts ClassAccount (r:0 w:1) + // Storage: Nfts Collection (r:1 w:0) // Storage: Nfts CollectionConfigOf (r:0 w:1) fn force_collection_config() -> Weight { - Weight::from_ref_time(28_468_000 as u64) - .saturating_add(T::DbWeight::get().reads(1 as u64)) - .saturating_add(T::DbWeight::get().writes(3 as u64)) + // Minimum execution time: 23_990 nanoseconds. + Weight::from_ref_time(24_347_000) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) } - // Storage: Nfts Class (r:1 w:0) + // Storage: Nfts Collection (r:1 w:0) // Storage: Nfts ItemConfigOf (r:1 w:1) fn lock_item_properties() -> Weight { - Weight::from_ref_time(27_377_000 as u64) - .saturating_add(T::DbWeight::get().reads(2 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + // Minimum execution time: 28_481 nanoseconds. + Weight::from_ref_time(28_929_000) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) } - // Storage: Nfts Class (r:1 w:1) + // Storage: Nfts Collection (r:1 w:1) // Storage: Nfts CollectionConfigOf (r:1 w:0) // Storage: Nfts ItemConfigOf (r:1 w:0) // Storage: Nfts Attribute (r:1 w:1) fn set_attribute() -> Weight { - Weight::from_ref_time(53_019_000 as u64) - .saturating_add(T::DbWeight::get().reads(4 as u64)) - .saturating_add(T::DbWeight::get().writes(2 as u64)) + // Minimum execution time: 54_435 nanoseconds. + Weight::from_ref_time(55_237_000) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(2)) } - // Storage: Nfts Class (r:1 w:1) - // Storage: Nfts CollectionConfigOf (r:1 w:0) - // Storage: Nfts ItemConfigOf (r:1 w:0) + // Storage: Nfts Collection (r:1 w:1) // Storage: Nfts Attribute (r:1 w:1) fn force_set_attribute() -> Weight { - Weight::from_ref_time(53_019_000 as u64) - .saturating_add(T::DbWeight::get().reads(4 as u64)) - .saturating_add(T::DbWeight::get().writes(2 as u64)) + // Minimum execution time: 35_254 nanoseconds. + Weight::from_ref_time(35_941_000) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) } - // Storage: Nfts Class (r:1 w:1) - // Storage: Nfts CollectionConfigOf (r:1 w:0) - // Storage: Nfts ItemConfigOf (r:1 w:0) // Storage: Nfts Attribute (r:1 w:1) + // Storage: Nfts Collection (r:1 w:1) + // Storage: Nfts ItemConfigOf (r:1 w:0) fn clear_attribute() -> Weight { - Weight::from_ref_time(52_530_000 as u64) - .saturating_add(T::DbWeight::get().reads(4 as u64)) - .saturating_add(T::DbWeight::get().writes(2 as u64)) + // Minimum execution time: 51_483 nanoseconds. + Weight::from_ref_time(52_915_000) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) } - // Storage: Nfts Class (r:1 w:1) - // Storage: Nfts CollectionConfigOf (r:1 w:0) - // Storage: Nfts ItemConfigOf (r:1 w:0) - // Storage: Nfts Attribute (r:1 w:1) + // Storage: Nfts Item (r:1 w:0) + // Storage: Nfts ItemAttributesApprovalsOf (r:1 w:1) fn approve_item_attributes() -> Weight { - Weight::from_ref_time(52_530_000 as u64) - .saturating_add(T::DbWeight::get().reads(4 as u64)) - .saturating_add(T::DbWeight::get().writes(2 as u64)) - } - // Storage: Nfts Class (r:1 w:1) - // Storage: Nfts CollectionConfigOf (r:1 w:0) - // Storage: Nfts ItemConfigOf (r:1 w:0) - // Storage: Nfts Attribute (r:1 w:1) - fn cancel_item_attributes_approval() -> Weight { - Weight::from_ref_time(52_530_000 as u64) - .saturating_add(T::DbWeight::get().reads(4 as u64)) - .saturating_add(T::DbWeight::get().writes(2 as u64)) - } - // Storage: Nfts Class (r:1 w:1) + // Minimum execution time: 27_929 nanoseconds. + Weight::from_ref_time(28_329_000) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + // Storage: Nfts Item (r:1 w:0) + // Storage: Nfts ItemAttributesApprovalsOf (r:1 w:1) + // Storage: Nfts Attribute (r:1 w:0) + // Storage: System Account (r:1 w:1) + /// The range of component `n` is `[0, 1000]`. + fn cancel_item_attributes_approval(n: u32, ) -> Weight { + // Minimum execution time: 37_217 nanoseconds. + Weight::from_ref_time(37_692_000) + // Standard Error: 7_804 + .saturating_add(Weight::from_ref_time(7_344_173).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) + .saturating_add(T::DbWeight::get().writes(2)) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(n.into()))) + } + // Storage: Nfts Collection (r:1 w:1) // Storage: Nfts ItemConfigOf (r:1 w:0) // Storage: Nfts CollectionConfigOf (r:1 w:0) - // Storage: Nfts InstanceMetadataOf (r:1 w:1) + // Storage: Nfts ItemMetadataOf (r:1 w:1) fn set_metadata() -> Weight { - Weight::from_ref_time(48_054_000 as u64) - .saturating_add(T::DbWeight::get().reads(4 as u64)) - .saturating_add(T::DbWeight::get().writes(2 as u64)) + // Minimum execution time: 47_675 nanoseconds. + Weight::from_ref_time(48_282_000) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(2)) } - // Storage: Nfts Class (r:1 w:1) + // Storage: Nfts Collection (r:1 w:1) // Storage: Nfts ItemConfigOf (r:1 w:0) - // Storage: Nfts InstanceMetadataOf (r:1 w:1) + // Storage: Nfts ItemMetadataOf (r:1 w:1) fn clear_metadata() -> Weight { - Weight::from_ref_time(46_590_000 as u64) - .saturating_add(T::DbWeight::get().reads(3 as u64)) - .saturating_add(T::DbWeight::get().writes(2 as u64)) + // Minimum execution time: 46_062 nanoseconds. + Weight::from_ref_time(46_854_000) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) } // Storage: Nfts CollectionConfigOf (r:1 w:0) - // Storage: Nfts Class (r:1 w:1) - // Storage: Nfts ClassMetadataOf (r:1 w:1) + // Storage: Nfts Collection (r:1 w:1) + // Storage: Nfts CollectionMetadataOf (r:1 w:1) fn set_collection_metadata() -> Weight { - Weight::from_ref_time(44_281_000 as u64) - .saturating_add(T::DbWeight::get().reads(3 as u64)) - .saturating_add(T::DbWeight::get().writes(2 as u64)) + // Minimum execution time: 43_847 nanoseconds. + Weight::from_ref_time(44_792_000) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) } - // Storage: Nfts Class (r:1 w:0) + // Storage: Nfts Collection (r:1 w:0) // Storage: Nfts CollectionConfigOf (r:1 w:0) - // Storage: Nfts ClassMetadataOf (r:1 w:1) + // Storage: Nfts CollectionMetadataOf (r:1 w:1) fn clear_collection_metadata() -> Weight { - Weight::from_ref_time(42_355_000 as u64) - .saturating_add(T::DbWeight::get().reads(3 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + // Minimum execution time: 42_403 nanoseconds. + Weight::from_ref_time(42_811_000) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(1)) } - // Storage: Nfts Class (r:1 w:0) - // Storage: Nfts Asset (r:1 w:1) + // Storage: Nfts Item (r:1 w:1) // Storage: Nfts CollectionConfigOf (r:1 w:0) + // Storage: Nfts CollectionRoleOf (r:1 w:0) fn approve_transfer() -> Weight { - Weight::from_ref_time(33_170_000 as u64) - .saturating_add(T::DbWeight::get().reads(3 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + // Minimum execution time: 34_880 nanoseconds. + Weight::from_ref_time(35_737_000) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(1)) } - // Storage: Nfts Class (r:1 w:0) - // Storage: Nfts Asset (r:1 w:1) + // Storage: Nfts Item (r:1 w:1) + // Storage: Nfts CollectionRoleOf (r:1 w:0) fn cancel_approval() -> Weight { - Weight::from_ref_time(31_121_000 as u64) - .saturating_add(T::DbWeight::get().reads(2 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + // Minimum execution time: 31_606 nanoseconds. + Weight::from_ref_time(32_339_000) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) } - // Storage: Nfts Class (r:1 w:0) - // Storage: Nfts Asset (r:1 w:1) + // Storage: Nfts Item (r:1 w:1) + // Storage: Nfts CollectionRoleOf (r:1 w:0) fn clear_all_transfer_approvals() -> Weight { - Weight::from_ref_time(30_133_000 as u64) - .saturating_add(T::DbWeight::get().reads(2 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + // Minimum execution time: 30_626 nanoseconds. + Weight::from_ref_time(31_043_000) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) } // Storage: Nfts OwnershipAcceptance (r:1 w:1) fn set_accept_ownership() -> Weight { - Weight::from_ref_time(26_421_000 as u64) - .saturating_add(T::DbWeight::get().reads(1 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + // Minimum execution time: 27_276 nanoseconds. + Weight::from_ref_time(28_016_000) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) } - // Storage: Nfts CollectionMaxSupply (r:1 w:1) - // Storage: Nfts Class (r:1 w:0) + // Storage: Nfts CollectionConfigOf (r:1 w:1) + // Storage: Nfts Collection (r:1 w:0) fn set_collection_max_supply() -> Weight { - Weight::from_ref_time(26_358_000 as u64) - .saturating_add(T::DbWeight::get().reads(2 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + // Minimum execution time: 28_366 nanoseconds. + Weight::from_ref_time(28_719_000) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) } - // Storage: Nfts CollectionMaxSupply (r:1 w:1) - // Storage: Nfts Class (r:1 w:0) + // Storage: Nfts Collection (r:1 w:0) + // Storage: Nfts CollectionConfigOf (r:1 w:1) fn update_mint_settings() -> Weight { - Weight::from_ref_time(26_358_000 as u64) - .saturating_add(T::DbWeight::get().reads(2 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + // Minimum execution time: 27_292 nanoseconds. + Weight::from_ref_time(27_614_000) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) } - // Storage: Nfts Asset (r:1 w:0) + // Storage: Nfts Item (r:1 w:0) // Storage: Nfts CollectionConfigOf (r:1 w:0) // Storage: Nfts ItemConfigOf (r:1 w:0) // Storage: Nfts ItemPriceOf (r:0 w:1) fn set_price() -> Weight { - Weight::from_ref_time(33_607_000 as u64) - .saturating_add(T::DbWeight::get().reads(3 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + // Minimum execution time: 34_133 nanoseconds. + Weight::from_ref_time(34_510_000) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(1)) } - // Storage: Nfts Asset (r:1 w:1) + // Storage: Nfts Item (r:1 w:1) // Storage: Nfts ItemPriceOf (r:1 w:1) - // Storage: Nfts Class (r:1 w:0) + // Storage: Nfts Collection (r:1 w:0) // Storage: Nfts CollectionConfigOf (r:1 w:0) // Storage: Nfts ItemConfigOf (r:1 w:0) + // Storage: System Account (r:1 w:1) // Storage: Nfts Account (r:0 w:2) // Storage: Nfts PendingSwapOf (r:0 w:1) fn buy_item() -> Weight { - Weight::from_ref_time(54_511_000 as u64) - .saturating_add(T::DbWeight::get().reads(5 as u64)) - .saturating_add(T::DbWeight::get().writes(4 as u64)) + // Minimum execution time: 69_501 nanoseconds. + Weight::from_ref_time(70_342_000) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(6)) } /// The range of component `n` is `[0, 10]`. fn pay_tips(n: u32, ) -> Weight { - Weight::from_ref_time(6_015_000 as u64) - // Standard Error: 34_307 - .saturating_add(Weight::from_ref_time(4_308_600 as u64).saturating_mul(n as u64)) + // Minimum execution time: 4_754 nanoseconds. + Weight::from_ref_time(11_356_736) + // Standard Error: 38_352 + .saturating_add(Weight::from_ref_time(3_427_961).saturating_mul(n.into())) } - // Storage: Nfts Asset (r:2 w:0) + // Storage: Nfts Item (r:2 w:0) // Storage: Nfts PendingSwapOf (r:0 w:1) fn create_swap() -> Weight { - Weight::from_ref_time(30_330_000 as u64) - .saturating_add(T::DbWeight::get().reads(2 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + // Minimum execution time: 31_371 nanoseconds. + Weight::from_ref_time(32_227_000) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) } // Storage: Nfts PendingSwapOf (r:1 w:1) - // Storage: Nfts Asset (r:1 w:0) + // Storage: Nfts Item (r:1 w:0) fn cancel_swap() -> Weight { - Weight::from_ref_time(30_516_000 as u64) - .saturating_add(T::DbWeight::get().reads(2 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + // Minimum execution time: 34_114 nanoseconds. + Weight::from_ref_time(34_779_000) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) } - // Storage: Nfts Asset (r:2 w:2) + // Storage: Nfts Item (r:2 w:2) // Storage: Nfts PendingSwapOf (r:1 w:2) - // Storage: Nfts Class (r:1 w:0) + // Storage: Nfts Collection (r:1 w:0) + // Storage: Nfts CollectionConfigOf (r:1 w:0) + // Storage: Nfts ItemConfigOf (r:2 w:0) + // Storage: System Account (r:1 w:1) // Storage: Nfts Account (r:0 w:4) // Storage: Nfts ItemPriceOf (r:0 w:2) fn claim_swap() -> Weight { - Weight::from_ref_time(66_191_000 as u64) - .saturating_add(T::DbWeight::get().reads(4 as u64)) - .saturating_add(T::DbWeight::get().writes(10 as u64)) + // Minimum execution time: 97_965 nanoseconds. + Weight::from_ref_time(98_699_000) + .saturating_add(T::DbWeight::get().reads(8)) + .saturating_add(T::DbWeight::get().writes(11)) } } // For backwards compatibility and tests impl WeightInfo for () { // Storage: Nfts NextCollectionId (r:1 w:1) - // Storage: Nfts Class (r:1 w:1) - // Storage: Nfts ClassAccount (r:0 w:1) + // Storage: Nfts Collection (r:1 w:1) + // Storage: Nfts CollectionRoleOf (r:0 w:1) // Storage: Nfts CollectionConfigOf (r:0 w:1) + // Storage: Nfts CollectionAccount (r:0 w:1) fn create() -> Weight { - Weight::from_ref_time(39_252_000 as u64) - .saturating_add(RocksDbWeight::get().reads(2 as u64)) - .saturating_add(RocksDbWeight::get().writes(4 as u64)) + // Minimum execution time: 42_075 nanoseconds. + Weight::from_ref_time(42_614_000) + .saturating_add(RocksDbWeight::get().reads(2)) + .saturating_add(RocksDbWeight::get().writes(5)) } // Storage: Nfts NextCollectionId (r:1 w:1) - // Storage: Nfts Class (r:1 w:1) - // Storage: Nfts ClassAccount (r:0 w:1) + // Storage: Nfts Collection (r:1 w:1) + // Storage: Nfts CollectionRoleOf (r:0 w:1) // Storage: Nfts CollectionConfigOf (r:0 w:1) + // Storage: Nfts CollectionAccount (r:0 w:1) fn force_create() -> Weight { - Weight::from_ref_time(27_479_000 as u64) - .saturating_add(RocksDbWeight::get().reads(2 as u64)) - .saturating_add(RocksDbWeight::get().writes(4 as u64)) - } - // Storage: Nfts Class (r:1 w:1) - // Storage: Nfts Asset (r:1 w:0) - // Storage: Nfts ClassAccount (r:0 w:1) - // Storage: Nfts ClassMetadataOf (r:0 w:1) + // Minimum execution time: 29_799 nanoseconds. + Weight::from_ref_time(30_511_000) + .saturating_add(RocksDbWeight::get().reads(2)) + .saturating_add(RocksDbWeight::get().writes(5)) + } + // Storage: Nfts Collection (r:1 w:1) + // Storage: Nfts Item (r:1 w:0) + // Storage: Nfts Attribute (r:1 w:0) + // Storage: Nfts CollectionRoleOf (r:0 w:1) + // Storage: Nfts CollectionMetadataOf (r:0 w:1) // Storage: Nfts CollectionConfigOf (r:0 w:1) - // Storage: Nfts CollectionMaxSupply (r:0 w:1) - // Storage: Nfts Attribute (r:0 w:20) - // Storage: Nfts InstanceMetadataOf (r:0 w:20) + // Storage: Nfts CollectionAccount (r:0 w:1) + // Storage: Nfts ItemMetadataOf (r:0 w:20) // Storage: Nfts ItemConfigOf (r:0 w:20) // Storage: Nfts Account (r:0 w:20) /// The range of component `n` is `[0, 1000]`. - /// The range of component `m` is `[0, 1000]`. - /// The range of component `a` is `[0, 1000]`. - fn destroy(n: u32, m: u32, a: u32, ) -> Weight { - Weight::from_ref_time(55_419_000 as u64) - // Standard Error: 18_623 - .saturating_add(Weight::from_ref_time(12_843_237 as u64).saturating_mul(n as u64)) - // Standard Error: 27_329 - .saturating_add(Weight::from_ref_time(315_839 as u64).saturating_mul(m as u64)) - // Standard Error: 27_329 - .saturating_add(Weight::from_ref_time(217_497 as u64).saturating_mul(a as u64)) - .saturating_add(RocksDbWeight::get().reads(2 as u64)) - .saturating_add(RocksDbWeight::get().reads((1 as u64).saturating_mul(n as u64))) - .saturating_add(RocksDbWeight::get().writes(5 as u64)) - .saturating_add(RocksDbWeight::get().writes((5 as u64).saturating_mul(n as u64))) - } - // Storage: Nfts Asset (r:1 w:1) - // Storage: Nfts Class (r:1 w:1) - // Storage: Nfts CollectionMaxSupply (r:1 w:0) + fn destroy(n: u32, ) -> Weight { + // Minimum execution time: 65_846 nanoseconds. + Weight::from_ref_time(66_082_000) + // Standard Error: 27_878 + .saturating_add(Weight::from_ref_time(26_747_590).saturating_mul(n.into())) + .saturating_add(RocksDbWeight::get().reads(3)) + .saturating_add(RocksDbWeight::get().reads((2_u64).saturating_mul(n.into()))) + .saturating_add(RocksDbWeight::get().writes(5)) + .saturating_add(RocksDbWeight::get().writes((5_u64).saturating_mul(n.into()))) + } // Storage: Nfts CollectionConfigOf (r:1 w:0) - // Storage: Nfts ItemConfigOf (r:0 w:1) + // Storage: Nfts Item (r:1 w:1) + // Storage: Nfts Collection (r:1 w:1) + // Storage: Nfts CollectionRoleOf (r:1 w:0) + // Storage: Nfts ItemConfigOf (r:1 w:1) // Storage: Nfts Account (r:0 w:1) fn mint() -> Weight { - Weight::from_ref_time(47_947_000 as u64) - .saturating_add(RocksDbWeight::get().reads(4 as u64)) - .saturating_add(RocksDbWeight::get().writes(4 as u64)) - } - // Storage: Nfts Asset (r:1 w:1) - // Storage: Nfts Class (r:1 w:1) - // Storage: Nfts CollectionMaxSupply (r:1 w:0) + // Minimum execution time: 58_577 nanoseconds. + Weight::from_ref_time(59_058_000) + .saturating_add(RocksDbWeight::get().reads(5)) + .saturating_add(RocksDbWeight::get().writes(4)) + } + // Storage: Nfts CollectionRoleOf (r:1 w:0) + // Storage: Nfts Item (r:1 w:1) + // Storage: Nfts Collection (r:1 w:1) // Storage: Nfts CollectionConfigOf (r:1 w:0) - // Storage: Nfts ItemConfigOf (r:0 w:1) + // Storage: Nfts ItemConfigOf (r:1 w:1) // Storage: Nfts Account (r:0 w:1) fn force_mint() -> Weight { - Weight::from_ref_time(47_947_000 as u64) - .saturating_add(RocksDbWeight::get().reads(4 as u64)) - .saturating_add(RocksDbWeight::get().writes(4 as u64)) - } - // Storage: Nfts Class (r:1 w:1) - // Storage: Nfts Asset (r:1 w:1) - // Storage: Nfts ItemConfigOf (r:0 w:1) + // Minimum execution time: 56_494 nanoseconds. + Weight::from_ref_time(57_565_000) + .saturating_add(RocksDbWeight::get().reads(5)) + .saturating_add(RocksDbWeight::get().writes(4)) + } + // Storage: Nfts Collection (r:1 w:1) + // Storage: Nfts Item (r:1 w:1) + // Storage: Nfts CollectionRoleOf (r:1 w:0) + // Storage: Nfts ItemConfigOf (r:1 w:1) // Storage: Nfts Account (r:0 w:1) // Storage: Nfts ItemPriceOf (r:0 w:1) + // Storage: Nfts ItemAttributesApprovalsOf (r:0 w:1) // Storage: Nfts PendingSwapOf (r:0 w:1) fn burn() -> Weight { - Weight::from_ref_time(47_193_000 as u64) - .saturating_add(RocksDbWeight::get().reads(2 as u64)) - .saturating_add(RocksDbWeight::get().writes(5 as u64)) + // Minimum execution time: 59_393 nanoseconds. + Weight::from_ref_time(60_562_000) + .saturating_add(RocksDbWeight::get().reads(4)) + .saturating_add(RocksDbWeight::get().writes(7)) } - // Storage: Nfts Class (r:1 w:0) + // Storage: Nfts Collection (r:1 w:0) // Storage: Nfts CollectionConfigOf (r:1 w:0) // Storage: Nfts ItemConfigOf (r:1 w:0) - // Storage: Nfts Asset (r:1 w:1) + // Storage: Nfts Item (r:1 w:1) + // Storage: Nfts CollectionRoleOf (r:1 w:0) + // Storage: System Account (r:1 w:1) // Storage: Nfts Account (r:0 w:2) // Storage: Nfts ItemPriceOf (r:0 w:1) // Storage: Nfts PendingSwapOf (r:0 w:1) fn transfer() -> Weight { - Weight::from_ref_time(42_305_000 as u64) - .saturating_add(RocksDbWeight::get().reads(4 as u64)) - .saturating_add(RocksDbWeight::get().writes(4 as u64)) + // Minimum execution time: 65_852 nanoseconds. + Weight::from_ref_time(66_308_000) + .saturating_add(RocksDbWeight::get().reads(6)) + .saturating_add(RocksDbWeight::get().writes(6)) } - // Storage: Nfts Class (r:1 w:1) + // Storage: Nfts Collection (r:1 w:0) // Storage: Nfts CollectionConfigOf (r:1 w:0) - // Storage: Nfts Asset (r:102 w:102) + // Storage: Nfts Item (r:102 w:102) /// The range of component `i` is `[0, 5000]`. fn redeposit(i: u32, ) -> Weight { - Weight::from_ref_time(26_327_000 as u64) - // Standard Error: 10_090 - .saturating_add(Weight::from_ref_time(10_876_864 as u64).saturating_mul(i as u64)) - .saturating_add(RocksDbWeight::get().reads(2 as u64)) - .saturating_add(RocksDbWeight::get().reads((1 as u64).saturating_mul(i as u64))) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) - .saturating_add(RocksDbWeight::get().writes((1 as u64).saturating_mul(i as u64))) - } - // Storage: Nfts Class (r:1 w:0) + // Minimum execution time: 25_795 nanoseconds. + Weight::from_ref_time(26_128_000) + // Standard Error: 10_295 + .saturating_add(Weight::from_ref_time(11_202_286).saturating_mul(i.into())) + .saturating_add(RocksDbWeight::get().reads(2)) + .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(i.into()))) + .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(i.into()))) + } + // Storage: Nfts CollectionRoleOf (r:1 w:0) // Storage: Nfts ItemConfigOf (r:1 w:1) fn lock_item_transfer() -> Weight { - Weight::from_ref_time(28_194_000 as u64) - .saturating_add(RocksDbWeight::get().reads(2 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + // Minimum execution time: 29_090 nanoseconds. + Weight::from_ref_time(29_772_000) + .saturating_add(RocksDbWeight::get().reads(2)) + .saturating_add(RocksDbWeight::get().writes(1)) } - // Storage: Nfts Class (r:1 w:0) + // Storage: Nfts CollectionRoleOf (r:1 w:0) // Storage: Nfts ItemConfigOf (r:1 w:1) fn unlock_item_transfer() -> Weight { - Weight::from_ref_time(28_821_000 as u64) - .saturating_add(RocksDbWeight::get().reads(2 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + // Minimum execution time: 28_947 nanoseconds. + Weight::from_ref_time(29_559_000) + .saturating_add(RocksDbWeight::get().reads(2)) + .saturating_add(RocksDbWeight::get().writes(1)) } - // Storage: Nfts Class (r:1 w:0) + // Storage: Nfts CollectionRoleOf (r:1 w:0) // Storage: Nfts CollectionConfigOf (r:1 w:1) fn lock_collection() -> Weight { - Weight::from_ref_time(25_896_000 as u64) - .saturating_add(RocksDbWeight::get().reads(2 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + // Minimum execution time: 26_972 nanoseconds. + Weight::from_ref_time(27_803_000) + .saturating_add(RocksDbWeight::get().reads(2)) + .saturating_add(RocksDbWeight::get().writes(1)) } // Storage: Nfts OwnershipAcceptance (r:1 w:1) - // Storage: Nfts Class (r:1 w:1) - // Storage: Nfts ClassAccount (r:0 w:2) + // Storage: Nfts Collection (r:1 w:1) + // Storage: Nfts CollectionAccount (r:0 w:2) fn transfer_ownership() -> Weight { - Weight::from_ref_time(32_728_000 as u64) - .saturating_add(RocksDbWeight::get().reads(2 as u64)) - .saturating_add(RocksDbWeight::get().writes(4 as u64)) + // Minimum execution time: 32_165 nanoseconds. + Weight::from_ref_time(32_926_000) + .saturating_add(RocksDbWeight::get().reads(2)) + .saturating_add(RocksDbWeight::get().writes(4)) } - // Storage: Nfts Class (r:1 w:1) + // Storage: Nfts Collection (r:1 w:1) + // Storage: Nfts CollectionRoleOf (r:0 w:4) fn set_team() -> Weight { - Weight::from_ref_time(24_805_000 as u64) - .saturating_add(RocksDbWeight::get().reads(1 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + // Minimum execution time: 35_375 nanoseconds. + Weight::from_ref_time(35_950_000) + .saturating_add(RocksDbWeight::get().reads(1)) + .saturating_add(RocksDbWeight::get().writes(5)) } - // Storage: Nfts Class (r:1 w:1) - // Storage: Nfts ClassAccount (r:0 w:1) - // Storage: Nfts CollectionConfigOf (r:0 w:1) + // Storage: Nfts Collection (r:1 w:1) + // Storage: Nfts CollectionAccount (r:0 w:2) fn force_collection_owner() -> Weight { - Weight::from_ref_time(28_468_000 as u64) - .saturating_add(RocksDbWeight::get().reads(1 as u64)) - .saturating_add(RocksDbWeight::get().writes(3 as u64)) + // Minimum execution time: 27_470 nanoseconds. + Weight::from_ref_time(27_855_000) + .saturating_add(RocksDbWeight::get().reads(1)) + .saturating_add(RocksDbWeight::get().writes(3)) } - // Storage: Nfts Class (r:1 w:1) - // Storage: Nfts ClassAccount (r:0 w:1) + // Storage: Nfts Collection (r:1 w:0) // Storage: Nfts CollectionConfigOf (r:0 w:1) fn force_collection_config() -> Weight { - Weight::from_ref_time(28_468_000 as u64) - .saturating_add(RocksDbWeight::get().reads(1 as u64)) - .saturating_add(RocksDbWeight::get().writes(3 as u64)) + // Minimum execution time: 23_990 nanoseconds. + Weight::from_ref_time(24_347_000) + .saturating_add(RocksDbWeight::get().reads(1)) + .saturating_add(RocksDbWeight::get().writes(1)) } - // Storage: Nfts Class (r:1 w:0) + // Storage: Nfts Collection (r:1 w:0) // Storage: Nfts ItemConfigOf (r:1 w:1) fn lock_item_properties() -> Weight { - Weight::from_ref_time(27_377_000 as u64) - .saturating_add(RocksDbWeight::get().reads(2 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + // Minimum execution time: 28_481 nanoseconds. + Weight::from_ref_time(28_929_000) + .saturating_add(RocksDbWeight::get().reads(2)) + .saturating_add(RocksDbWeight::get().writes(1)) } - // Storage: Nfts Class (r:1 w:1) + // Storage: Nfts Collection (r:1 w:1) // Storage: Nfts CollectionConfigOf (r:1 w:0) // Storage: Nfts ItemConfigOf (r:1 w:0) // Storage: Nfts Attribute (r:1 w:1) fn set_attribute() -> Weight { - Weight::from_ref_time(53_019_000 as u64) - .saturating_add(RocksDbWeight::get().reads(4 as u64)) - .saturating_add(RocksDbWeight::get().writes(2 as u64)) + // Minimum execution time: 54_435 nanoseconds. + Weight::from_ref_time(55_237_000) + .saturating_add(RocksDbWeight::get().reads(4)) + .saturating_add(RocksDbWeight::get().writes(2)) } - // Storage: Nfts Class (r:1 w:1) - // Storage: Nfts CollectionConfigOf (r:1 w:0) - // Storage: Nfts ItemConfigOf (r:1 w:0) + // Storage: Nfts Collection (r:1 w:1) // Storage: Nfts Attribute (r:1 w:1) fn force_set_attribute() -> Weight { - Weight::from_ref_time(53_019_000 as u64) - .saturating_add(RocksDbWeight::get().reads(4 as u64)) - .saturating_add(RocksDbWeight::get().writes(2 as u64)) + // Minimum execution time: 35_254 nanoseconds. + Weight::from_ref_time(35_941_000) + .saturating_add(RocksDbWeight::get().reads(2)) + .saturating_add(RocksDbWeight::get().writes(2)) } - // Storage: Nfts Class (r:1 w:1) - // Storage: Nfts CollectionConfigOf (r:1 w:0) - // Storage: Nfts ItemConfigOf (r:1 w:0) // Storage: Nfts Attribute (r:1 w:1) + // Storage: Nfts Collection (r:1 w:1) + // Storage: Nfts ItemConfigOf (r:1 w:0) fn clear_attribute() -> Weight { - Weight::from_ref_time(52_530_000 as u64) - .saturating_add(RocksDbWeight::get().reads(4 as u64)) - .saturating_add(RocksDbWeight::get().writes(2 as u64)) + // Minimum execution time: 51_483 nanoseconds. + Weight::from_ref_time(52_915_000) + .saturating_add(RocksDbWeight::get().reads(3)) + .saturating_add(RocksDbWeight::get().writes(2)) } - // Storage: Nfts Class (r:1 w:1) - // Storage: Nfts CollectionConfigOf (r:1 w:0) - // Storage: Nfts ItemConfigOf (r:1 w:0) - // Storage: Nfts Attribute (r:1 w:1) + // Storage: Nfts Item (r:1 w:0) + // Storage: Nfts ItemAttributesApprovalsOf (r:1 w:1) fn approve_item_attributes() -> Weight { - Weight::from_ref_time(52_530_000 as u64) - .saturating_add(RocksDbWeight::get().reads(4 as u64)) - .saturating_add(RocksDbWeight::get().writes(2 as u64)) - } - // Storage: Nfts Class (r:1 w:1) - // Storage: Nfts CollectionConfigOf (r:1 w:0) - // Storage: Nfts ItemConfigOf (r:1 w:0) - // Storage: Nfts Attribute (r:1 w:1) - fn cancel_item_attributes_approval() -> Weight { - Weight::from_ref_time(52_530_000 as u64) - .saturating_add(RocksDbWeight::get().reads(4 as u64)) - .saturating_add(RocksDbWeight::get().writes(2 as u64)) - } - // Storage: Nfts Class (r:1 w:1) + // Minimum execution time: 27_929 nanoseconds. + Weight::from_ref_time(28_329_000) + .saturating_add(RocksDbWeight::get().reads(2)) + .saturating_add(RocksDbWeight::get().writes(1)) + } + // Storage: Nfts Item (r:1 w:0) + // Storage: Nfts ItemAttributesApprovalsOf (r:1 w:1) + // Storage: Nfts Attribute (r:1 w:0) + // Storage: System Account (r:1 w:1) + /// The range of component `n` is `[0, 1000]`. + fn cancel_item_attributes_approval(n: u32, ) -> Weight { + // Minimum execution time: 37_217 nanoseconds. + Weight::from_ref_time(37_692_000) + // Standard Error: 7_804 + .saturating_add(Weight::from_ref_time(7_344_173).saturating_mul(n.into())) + .saturating_add(RocksDbWeight::get().reads(4)) + .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(n.into()))) + .saturating_add(RocksDbWeight::get().writes(2)) + .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(n.into()))) + } + // Storage: Nfts Collection (r:1 w:1) // Storage: Nfts ItemConfigOf (r:1 w:0) // Storage: Nfts CollectionConfigOf (r:1 w:0) - // Storage: Nfts InstanceMetadataOf (r:1 w:1) + // Storage: Nfts ItemMetadataOf (r:1 w:1) fn set_metadata() -> Weight { - Weight::from_ref_time(48_054_000 as u64) - .saturating_add(RocksDbWeight::get().reads(4 as u64)) - .saturating_add(RocksDbWeight::get().writes(2 as u64)) + // Minimum execution time: 47_675 nanoseconds. + Weight::from_ref_time(48_282_000) + .saturating_add(RocksDbWeight::get().reads(4)) + .saturating_add(RocksDbWeight::get().writes(2)) } - // Storage: Nfts Class (r:1 w:1) + // Storage: Nfts Collection (r:1 w:1) // Storage: Nfts ItemConfigOf (r:1 w:0) - // Storage: Nfts InstanceMetadataOf (r:1 w:1) + // Storage: Nfts ItemMetadataOf (r:1 w:1) fn clear_metadata() -> Weight { - Weight::from_ref_time(46_590_000 as u64) - .saturating_add(RocksDbWeight::get().reads(3 as u64)) - .saturating_add(RocksDbWeight::get().writes(2 as u64)) + // Minimum execution time: 46_062 nanoseconds. + Weight::from_ref_time(46_854_000) + .saturating_add(RocksDbWeight::get().reads(3)) + .saturating_add(RocksDbWeight::get().writes(2)) } // Storage: Nfts CollectionConfigOf (r:1 w:0) - // Storage: Nfts Class (r:1 w:1) - // Storage: Nfts ClassMetadataOf (r:1 w:1) + // Storage: Nfts Collection (r:1 w:1) + // Storage: Nfts CollectionMetadataOf (r:1 w:1) fn set_collection_metadata() -> Weight { - Weight::from_ref_time(44_281_000 as u64) - .saturating_add(RocksDbWeight::get().reads(3 as u64)) - .saturating_add(RocksDbWeight::get().writes(2 as u64)) + // Minimum execution time: 43_847 nanoseconds. + Weight::from_ref_time(44_792_000) + .saturating_add(RocksDbWeight::get().reads(3)) + .saturating_add(RocksDbWeight::get().writes(2)) } - // Storage: Nfts Class (r:1 w:0) + // Storage: Nfts Collection (r:1 w:0) // Storage: Nfts CollectionConfigOf (r:1 w:0) - // Storage: Nfts ClassMetadataOf (r:1 w:1) + // Storage: Nfts CollectionMetadataOf (r:1 w:1) fn clear_collection_metadata() -> Weight { - Weight::from_ref_time(42_355_000 as u64) - .saturating_add(RocksDbWeight::get().reads(3 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + // Minimum execution time: 42_403 nanoseconds. + Weight::from_ref_time(42_811_000) + .saturating_add(RocksDbWeight::get().reads(3)) + .saturating_add(RocksDbWeight::get().writes(1)) } - // Storage: Nfts Class (r:1 w:0) - // Storage: Nfts Asset (r:1 w:1) + // Storage: Nfts Item (r:1 w:1) // Storage: Nfts CollectionConfigOf (r:1 w:0) + // Storage: Nfts CollectionRoleOf (r:1 w:0) fn approve_transfer() -> Weight { - Weight::from_ref_time(33_170_000 as u64) - .saturating_add(RocksDbWeight::get().reads(3 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + // Minimum execution time: 34_880 nanoseconds. + Weight::from_ref_time(35_737_000) + .saturating_add(RocksDbWeight::get().reads(3)) + .saturating_add(RocksDbWeight::get().writes(1)) } - // Storage: Nfts Class (r:1 w:0) - // Storage: Nfts Asset (r:1 w:1) + // Storage: Nfts Item (r:1 w:1) + // Storage: Nfts CollectionRoleOf (r:1 w:0) fn cancel_approval() -> Weight { - Weight::from_ref_time(31_121_000 as u64) - .saturating_add(RocksDbWeight::get().reads(2 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + // Minimum execution time: 31_606 nanoseconds. + Weight::from_ref_time(32_339_000) + .saturating_add(RocksDbWeight::get().reads(2)) + .saturating_add(RocksDbWeight::get().writes(1)) } - // Storage: Nfts Class (r:1 w:0) - // Storage: Nfts Asset (r:1 w:1) + // Storage: Nfts Item (r:1 w:1) + // Storage: Nfts CollectionRoleOf (r:1 w:0) fn clear_all_transfer_approvals() -> Weight { - Weight::from_ref_time(30_133_000 as u64) - .saturating_add(RocksDbWeight::get().reads(2 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + // Minimum execution time: 30_626 nanoseconds. + Weight::from_ref_time(31_043_000) + .saturating_add(RocksDbWeight::get().reads(2)) + .saturating_add(RocksDbWeight::get().writes(1)) } // Storage: Nfts OwnershipAcceptance (r:1 w:1) fn set_accept_ownership() -> Weight { - Weight::from_ref_time(26_421_000 as u64) - .saturating_add(RocksDbWeight::get().reads(1 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + // Minimum execution time: 27_276 nanoseconds. + Weight::from_ref_time(28_016_000) + .saturating_add(RocksDbWeight::get().reads(1)) + .saturating_add(RocksDbWeight::get().writes(1)) } - // Storage: Nfts CollectionMaxSupply (r:1 w:1) - // Storage: Nfts Class (r:1 w:0) + // Storage: Nfts CollectionConfigOf (r:1 w:1) + // Storage: Nfts Collection (r:1 w:0) fn set_collection_max_supply() -> Weight { - Weight::from_ref_time(26_358_000 as u64) - .saturating_add(RocksDbWeight::get().reads(2 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + // Minimum execution time: 28_366 nanoseconds. + Weight::from_ref_time(28_719_000) + .saturating_add(RocksDbWeight::get().reads(2)) + .saturating_add(RocksDbWeight::get().writes(1)) } - // Storage: Nfts CollectionMaxSupply (r:1 w:1) - // Storage: Nfts Class (r:1 w:0) + // Storage: Nfts Collection (r:1 w:0) + // Storage: Nfts CollectionConfigOf (r:1 w:1) fn update_mint_settings() -> Weight { - Weight::from_ref_time(26_358_000 as u64) - .saturating_add(RocksDbWeight::get().reads(2 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + // Minimum execution time: 27_292 nanoseconds. + Weight::from_ref_time(27_614_000) + .saturating_add(RocksDbWeight::get().reads(2)) + .saturating_add(RocksDbWeight::get().writes(1)) } - // Storage: Nfts Asset (r:1 w:0) + // Storage: Nfts Item (r:1 w:0) // Storage: Nfts CollectionConfigOf (r:1 w:0) // Storage: Nfts ItemConfigOf (r:1 w:0) // Storage: Nfts ItemPriceOf (r:0 w:1) fn set_price() -> Weight { - Weight::from_ref_time(33_607_000 as u64) - .saturating_add(RocksDbWeight::get().reads(3 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + // Minimum execution time: 34_133 nanoseconds. + Weight::from_ref_time(34_510_000) + .saturating_add(RocksDbWeight::get().reads(3)) + .saturating_add(RocksDbWeight::get().writes(1)) } - // Storage: Nfts Asset (r:1 w:1) + // Storage: Nfts Item (r:1 w:1) // Storage: Nfts ItemPriceOf (r:1 w:1) - // Storage: Nfts Class (r:1 w:0) + // Storage: Nfts Collection (r:1 w:0) // Storage: Nfts CollectionConfigOf (r:1 w:0) // Storage: Nfts ItemConfigOf (r:1 w:0) + // Storage: System Account (r:1 w:1) // Storage: Nfts Account (r:0 w:2) // Storage: Nfts PendingSwapOf (r:0 w:1) fn buy_item() -> Weight { - Weight::from_ref_time(54_511_000 as u64) - .saturating_add(RocksDbWeight::get().reads(5 as u64)) - .saturating_add(RocksDbWeight::get().writes(4 as u64)) + // Minimum execution time: 69_501 nanoseconds. + Weight::from_ref_time(70_342_000) + .saturating_add(RocksDbWeight::get().reads(6)) + .saturating_add(RocksDbWeight::get().writes(6)) } /// The range of component `n` is `[0, 10]`. fn pay_tips(n: u32, ) -> Weight { - Weight::from_ref_time(5_477_000 as u64) - // Standard Error: 33_188 - .saturating_add(Weight::from_ref_time(4_285_339 as u64).saturating_mul(n as u64)) + // Minimum execution time: 4_754 nanoseconds. + Weight::from_ref_time(11_356_736) + // Standard Error: 38_352 + .saturating_add(Weight::from_ref_time(3_427_961).saturating_mul(n.into())) } - // Storage: Nfts Asset (r:2 w:0) + // Storage: Nfts Item (r:2 w:0) // Storage: Nfts PendingSwapOf (r:0 w:1) fn create_swap() -> Weight { - Weight::from_ref_time(30_330_000 as u64) - .saturating_add(RocksDbWeight::get().reads(2 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + // Minimum execution time: 31_371 nanoseconds. + Weight::from_ref_time(32_227_000) + .saturating_add(RocksDbWeight::get().reads(2)) + .saturating_add(RocksDbWeight::get().writes(1)) } // Storage: Nfts PendingSwapOf (r:1 w:1) - // Storage: Nfts Asset (r:1 w:0) + // Storage: Nfts Item (r:1 w:0) fn cancel_swap() -> Weight { - Weight::from_ref_time(30_516_000 as u64) - .saturating_add(RocksDbWeight::get().reads(2 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + // Minimum execution time: 34_114 nanoseconds. + Weight::from_ref_time(34_779_000) + .saturating_add(RocksDbWeight::get().reads(2)) + .saturating_add(RocksDbWeight::get().writes(1)) } - // Storage: Nfts Asset (r:2 w:2) + // Storage: Nfts Item (r:2 w:2) // Storage: Nfts PendingSwapOf (r:1 w:2) - // Storage: Nfts Class (r:1 w:0) + // Storage: Nfts Collection (r:1 w:0) + // Storage: Nfts CollectionConfigOf (r:1 w:0) + // Storage: Nfts ItemConfigOf (r:2 w:0) + // Storage: System Account (r:1 w:1) // Storage: Nfts Account (r:0 w:4) // Storage: Nfts ItemPriceOf (r:0 w:2) fn claim_swap() -> Weight { - Weight::from_ref_time(66_191_000 as u64) - .saturating_add(RocksDbWeight::get().reads(4 as u64)) - .saturating_add(RocksDbWeight::get().writes(10 as u64)) + // Minimum execution time: 97_965 nanoseconds. + Weight::from_ref_time(98_699_000) + .saturating_add(RocksDbWeight::get().reads(8)) + .saturating_add(RocksDbWeight::get().writes(11)) } } From 6e651a89c777bbb6f2bb4a5953eabab7457f1dc9 Mon Sep 17 00:00:00 2001 From: Jegor Sidorenko Date: Mon, 28 Nov 2022 10:59:09 +0200 Subject: [PATCH 029/101] Update docs --- frame/nfts/src/lib.rs | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/frame/nfts/src/lib.rs b/frame/nfts/src/lib.rs index ea379c4a5aa0b..8e012f42aea3a 100644 --- a/frame/nfts/src/lib.rs +++ b/frame/nfts/src/lib.rs @@ -1230,16 +1230,20 @@ pub mod pallet { Self::do_clear_all_transfer_approvals(maybe_check_origin, collection, item) } - /// Disallows changing the metadata of attributes of the item. + /// Disallows changing the metadata or attributes of the item. /// /// Origin must be either `ForceOrigin` or Signed and the sender should be the Owner of the /// `collection`. /// /// - `collection`: The collection if the `item`. /// - `item`: An item to be locked. - /// - `lock_config`: The config with the settings to be locked. + /// - `lock_metadata`: Specifies whether the metadata should be locked. + /// - `lock_attributes`: Specifies whether the attributes in the `CollectionOwner` namespace + /// should be locked. + /// + /// Note: `lock_attributes` affects the attributes in the `CollectionOwner` namespace + /// only. When the metadata or attributes are locked, it won't be possible the unlock them. /// - /// Note: when the metadata or attributes are locked, it won't be possible the unlock them. /// Emits `ItemPropertiesLocked`. /// /// Weight: `O(1)` From b051fd8138d72d17e2e967897fb620ee2d523cbc Mon Sep 17 00:00:00 2001 From: Jegor Sidorenko Date: Mon, 28 Nov 2022 16:43:27 +0200 Subject: [PATCH 030/101] Typo --- frame/nfts/src/lib.rs | 4 ++-- frame/nfts/src/tests.rs | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/frame/nfts/src/lib.rs b/frame/nfts/src/lib.rs index 8e012f42aea3a..21dd9b59de090 100644 --- a/frame/nfts/src/lib.rs +++ b/frame/nfts/src/lib.rs @@ -584,7 +584,7 @@ pub mod pallet { /// Some roles were not cleared. RolesNotCleared, /// Mint has not started yet. - MintNotStated, + MintNotStarted, /// Mint has already ended. MintEnded, /// The provided Item was already used for claiming. @@ -750,7 +750,7 @@ pub mod pallet { let now = frame_system::Pallet::::block_number(); if let Some(start_block) = mint_settings.start_block { - ensure!(start_block <= now, Error::::MintNotStated); + ensure!(start_block <= now, Error::::MintNotStarted); } if let Some(end_block) = mint_settings.end_block { ensure!(end_block >= now, Error::::MintEnded); diff --git a/frame/nfts/src/tests.rs b/frame/nfts/src/tests.rs index 7cbd7ff6c36f7..998250dc39ef7 100644 --- a/frame/nfts/src/tests.rs +++ b/frame/nfts/src/tests.rs @@ -242,7 +242,7 @@ fn mint_should_work() { System::set_block_number(1); assert_noop!( Nfts::mint(RuntimeOrigin::signed(1), 0, 43, None), - Error::::MintNotStated + Error::::MintNotStarted ); System::set_block_number(4); assert_noop!(Nfts::mint(RuntimeOrigin::signed(1), 0, 43, None), Error::::MintEnded); From 0556185149004efe49c33edddb9e5d97cd579cae Mon Sep 17 00:00:00 2001 From: Jegor Sidorenko Date: Mon, 28 Nov 2022 16:43:58 +0200 Subject: [PATCH 031/101] Fix benchmarks --- frame/nfts/src/benchmarking.rs | 12 ++++++++++++ frame/nfts/src/lib.rs | 4 +++- frame/nfts/src/weights.rs | 18 +++++++++++++++--- 3 files changed, 30 insertions(+), 4 deletions(-) diff --git a/frame/nfts/src/benchmarking.rs b/frame/nfts/src/benchmarking.rs index 5e1b0237ca3ec..4e392b147c4e8 100644 --- a/frame/nfts/src/benchmarking.rs +++ b/frame/nfts/src/benchmarking.rs @@ -171,12 +171,24 @@ benchmarks_instance_pallet! { destroy { let n in 0 .. 1_000; + let m in 0 .. 1_000; + let a in 0 .. 1_000; let (collection, caller, caller_lookup) = create_collection::(); add_collection_metadata::(); for i in 0..n { mint_item::(i as u16); + } + for i in 0..m { + if !Item::::contains_key(collection, T::Helper::item(i as u16)) { + mint_item::(i as u16); + } add_item_metadata::(T::Helper::item(i as u16)); + } + for i in 0..a { + if !Item::::contains_key(collection, T::Helper::item(i as u16)) { + mint_item::(i as u16); + } add_item_attribute::(T::Helper::item(i as u16)); } let witness = Collection::::get(collection).unwrap().destroy_witness(); diff --git a/frame/nfts/src/lib.rs b/frame/nfts/src/lib.rs index 21dd9b59de090..0182c54ea41f3 100644 --- a/frame/nfts/src/lib.rs +++ b/frame/nfts/src/lib.rs @@ -1393,7 +1393,9 @@ pub mod pallet { /// - `delegate`: The previously approved account to remove. /// /// Emits `ItemAttributesApprovalRemoved` on success. - #[pallet::weight(T::WeightInfo::cancel_item_attributes_approval())] + #[pallet::weight(T::WeightInfo::cancel_item_attributes_approval( + witness.account_attributes + ))] pub fn cancel_item_attributes_approval( origin: OriginFor, collection: T::CollectionId, diff --git a/frame/nfts/src/weights.rs b/frame/nfts/src/weights.rs index 67f65b10e98e3..2965f335c229e 100644 --- a/frame/nfts/src/weights.rs +++ b/frame/nfts/src/weights.rs @@ -50,7 +50,7 @@ use sp_std::marker::PhantomData; pub trait WeightInfo { fn create() -> Weight; fn force_create() -> Weight; - fn destroy(n: u32, ) -> Weight; + fn destroy(n: u32, m: u32, a: u32, ) -> Weight; fn mint() -> Weight; fn force_mint() -> Weight; fn burn() -> Weight; @@ -123,11 +123,17 @@ impl WeightInfo for SubstrateWeight { // Storage: Nfts ItemConfigOf (r:0 w:20) // Storage: Nfts Account (r:0 w:20) /// The range of component `n` is `[0, 1000]`. - fn destroy(n: u32, ) -> Weight { + /// The range of component `m` is `[0, 1000]`. + /// The range of component `a` is `[0, 1000]`. + fn destroy(n: u32, m: u32, a: u32, ) -> Weight { // Minimum execution time: 65_846 nanoseconds. Weight::from_ref_time(66_082_000) // Standard Error: 27_878 .saturating_add(Weight::from_ref_time(26_747_590).saturating_mul(n.into())) + // Standard Error: 27_329 + .saturating_add(Weight::from_ref_time(315_839 as u64).saturating_mul(m.into())) + // Standard Error: 27_329 + .saturating_add(Weight::from_ref_time(217_497 as u64).saturating_mul(a.into())) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().reads((2_u64).saturating_mul(n.into()))) .saturating_add(T::DbWeight::get().writes(5)) @@ -497,11 +503,17 @@ impl WeightInfo for () { // Storage: Nfts ItemConfigOf (r:0 w:20) // Storage: Nfts Account (r:0 w:20) /// The range of component `n` is `[0, 1000]`. - fn destroy(n: u32, ) -> Weight { + /// The range of component `m` is `[0, 1000]`. + /// The range of component `a` is `[0, 1000]`. + fn destroy(n: u32, m: u32, a: u32, ) -> Weight { // Minimum execution time: 65_846 nanoseconds. Weight::from_ref_time(66_082_000) // Standard Error: 27_878 .saturating_add(Weight::from_ref_time(26_747_590).saturating_mul(n.into())) + // Standard Error: 27_329 + .saturating_add(Weight::from_ref_time(315_839 as u64).saturating_mul(m.into())) + // Standard Error: 27_329 + .saturating_add(Weight::from_ref_time(217_497 as u64).saturating_mul(a.into())) .saturating_add(RocksDbWeight::get().reads(3)) .saturating_add(RocksDbWeight::get().reads((2_u64).saturating_mul(n.into()))) .saturating_add(RocksDbWeight::get().writes(5)) From 992601b0abe266a907b26009be0d3d32120cdfc1 Mon Sep 17 00:00:00 2001 From: Jegor Sidorenko Date: Mon, 28 Nov 2022 18:14:27 +0200 Subject: [PATCH 032/101] Add more docs --- frame/nfts/src/types.rs | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/frame/nfts/src/types.rs b/frame/nfts/src/types.rs index 5f13fb72eb33f..4cb92d692d7f7 100644 --- a/frame/nfts/src/types.rs +++ b/frame/nfts/src/types.rs @@ -66,6 +66,7 @@ pub trait Incrementable { } impl_incrementable!(u8, u16, u32, u64, u128, i8, i16, i32, i64, i128); +/// Information about the collection. #[derive(Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug, TypeInfo, MaxEncodedLen)] pub struct CollectionDetails { /// Collection's owner. @@ -133,6 +134,7 @@ pub struct ItemDeposit { pub(super) amount: DepositBalance, } +/// Information about the collection's metadata. #[derive(Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug, Default, TypeInfo, MaxEncodedLen)] #[scale_info(skip_type_params(StringLimit))] #[codec(mel_bound(DepositBalance: MaxEncodedLen))] @@ -147,6 +149,7 @@ pub struct CollectionMetadata> { pub(super) data: BoundedVec, } +/// Information about the item's metadata. #[derive(Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug, Default, TypeInfo, MaxEncodedLen)] #[scale_info(skip_type_params(StringLimit))] #[codec(mel_bound(DepositBalance: MaxEncodedLen))] @@ -161,6 +164,7 @@ pub struct ItemMetadata> { pub(super) data: BoundedVec, } +/// Information about the tip. #[derive(Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug, TypeInfo, MaxEncodedLen)] pub struct ItemTip { /// A collection of the item. @@ -173,6 +177,7 @@ pub struct ItemTip { pub(super) amount: Amount, } +/// Information about the pending swap. #[derive(Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug, Default, TypeInfo, MaxEncodedLen)] pub struct PendingSwap { /// A collection of the item user wants to receive. @@ -194,12 +199,16 @@ pub struct AttributeDeposit { pub(super) amount: DepositBalance, } +/// Specifies whether the tokens will be send or received. #[derive(Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug, TypeInfo, MaxEncodedLen)] pub enum PriceDirection { + /// Tokens will be send. Send, + /// Tokens will be received. Receive, } +/// Holds the details about the price. #[derive(Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug, TypeInfo, MaxEncodedLen)] pub struct PriceWithDirection { /// An amount. @@ -259,6 +268,7 @@ pub enum MintType { HolderOf(CollectionId), } +/// Holds the information about minting. #[derive(Clone, Copy, Encode, Decode, Eq, PartialEq, RuntimeDebug, TypeInfo, MaxEncodedLen)] pub struct MintSettings { /// Whether anyone can mint or if minters are restricted to some subset. @@ -285,18 +295,21 @@ impl Default for MintSettings { /// Marks an item as being used in order to claim another item. UsedToClaim(CollectionId), } +/// Collection's configuration. #[derive( Clone, Copy, Decode, Default, Encode, MaxEncodedLen, PartialEq, RuntimeDebug, TypeInfo, )] @@ -358,6 +371,7 @@ impl ItemSettings { impl_codec_bitflags!(ItemSettings, u64, ItemSetting); +/// Item's configuration. #[derive( Encode, Decode, Default, PartialEq, RuntimeDebug, Clone, Copy, MaxEncodedLen, TypeInfo, )] From 5732492c6fbfc3180c190ddee353ce927150fcb9 Mon Sep 17 00:00:00 2001 From: lana-shanghai Date: Thu, 1 Dec 2022 18:13:11 +0100 Subject: [PATCH 033/101] Replace uniques with nfts, add minted volume storage --- Cargo.lock | 2 +- bin/node/runtime/src/lib.rs | 5 +- frame/nft-fractionalisation/Cargo.toml | 4 +- frame/nft-fractionalisation/src/lib.rs | 100 +++++++++++++++---------- 4 files changed, 66 insertions(+), 45 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f6c62cfd17be2..43ec30b9a193a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5698,7 +5698,7 @@ dependencies = [ "log", "pallet-assets", "pallet-balances", - "pallet-uniques", + "pallet-nfts", "parity-scale-codec", "scale-info", "sp-core", diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index b346acd1dc195..d0aa7afcea14f 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -1555,10 +1555,11 @@ impl pallet_nft_fractionalisation::Config for Runtime { type RuntimeEvent = RuntimeEvent; type PalletId = NftFractionsPalletId; type Currency = Balances; - type CollectionId = Uniques; - type ItemId = Uniques; + type CollectionId = ::CollectionId; + type ItemId = ::ItemId; type AssetBalance = ::Balance; type Assets = Assets; + type Items = Nfts; type AssetId = ::AssetId; } diff --git a/frame/nft-fractionalisation/Cargo.toml b/frame/nft-fractionalisation/Cargo.toml index c16d024b5c53f..1d5ab04c5cb88 100644 --- a/frame/nft-fractionalisation/Cargo.toml +++ b/frame/nft-fractionalisation/Cargo.toml @@ -18,7 +18,7 @@ frame-benchmarking = { version = "4.0.0-dev", default-features = false, optional frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } pallet-assets = { version = "4.0.0-dev", default-features = false, path = "../assets" } -pallet-uniques = { version = "4.0.0-dev", default-features = false, path = "../uniques" } +pallet-nfts = { version = "4.0.0-dev", default-features = false, path = "../nfts" } sp-runtime = { version = "7.0.0", default-features = false, path = "../../primitives/runtime" } sp-std = { version = "5.0.0", default-features = false, path = "../../primitives/std" } @@ -37,7 +37,7 @@ std = [ "frame-system/std", "log/std", "pallet-assets/std", - "pallet-uniques/std", + "pallet-nfts/std", "scale-info/std", "sp-runtime/std", "sp-std/std", diff --git a/frame/nft-fractionalisation/src/lib.rs b/frame/nft-fractionalisation/src/lib.rs index 544b3c1a81d47..3b046126fd131 100644 --- a/frame/nft-fractionalisation/src/lib.rs +++ b/frame/nft-fractionalisation/src/lib.rs @@ -13,8 +13,8 @@ mod benchmarking; pub use scale_info::Type; -pub type ItemId = ::ItemId; -pub type CollectionId = ::CollectionId; +pub type ItemId = ::ItemId; +pub type CollectionId = ::CollectionId; #[frame_support::pallet] pub mod pallet { @@ -24,18 +24,24 @@ pub mod pallet { use frame_support::{ dispatch::DispatchResult, sp_runtime::traits::{ - AccountIdConversion, AtLeast32BitUnsigned, IntegerSquareRoot, StaticLookup, Zero, + AccountIdConversion, AtLeast32BitUnsigned, IntegerSquareRoot, Saturating, StaticLookup, + Zero, }, traits::{ fungibles::{ metadata::Mutate as MutateMetadata, Create, Inspect, InspectEnumerable, Mutate, Transfer, }, + tokens::nonfungibles_v2::{ + Inspect as NonFungiblesInspect, Transfer as NonFungiblesTransfer, + }, Currency, }, PalletId, }; + pub use pallet_nfts::Incrementable; + pub type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; pub type AssetIdOf = @@ -50,15 +56,16 @@ pub mod pallet { pub struct Pallet(_); #[pallet::config] - pub trait Config: frame_system::Config + pallet_uniques::Config //+ pallet_assets::Config - { + pub trait Config: frame_system::Config { type RuntimeEvent: From> + IsType<::RuntimeEvent>; type Currency: Currency; - type CollectionId; + /// Identifier for the collection of item. + type CollectionId: Member + Parameter + MaxEncodedLen + Copy + Incrementable; - type ItemId; + /// The type used to identify a unique item within a collection. + type ItemId: Member + Parameter + MaxEncodedLen + Copy; type AssetBalance: AtLeast32BitUnsigned + codec::FullCodec @@ -90,6 +97,12 @@ pub mod pallet { + MutateMetadata + Transfer; + type Items: NonFungiblesInspect< + Self::AccountId, + ItemId = Self::ItemId, + CollectionId = Self::CollectionId, + > + NonFungiblesTransfer; + #[pallet::constant] type PalletId: Get; } @@ -99,15 +112,13 @@ pub mod pallet { // TODO: query amount minted from pallet assets instead of storing it locally. // Add a public getter function to pallet assets. pub type AssetsMinted = - StorageMap<_, Twox64Concat, AssetIdOf, BalanceOf, OptionQuery>; + StorageMap<_, Twox64Concat, AssetIdOf, AssetBalanceOf, OptionQuery>; #[pallet::event] #[pallet::generate_deposit(pub(super) fn deposit_event)] pub enum Event { - NFTLocked( - ::CollectionId, - ::ItemId, - ), + PalletIdCreated(T::AccountId), + NFTLocked(T::CollectionId, T::ItemId), AssetCreated(AssetIdOf), AssetMinted(AssetIdOf, AssetBalanceOf), } @@ -122,8 +133,8 @@ pub mod pallet { #[pallet::weight(10_000 + T::DbWeight::get().writes(2).ref_time())] pub fn lock_nft_create_asset( origin: OriginFor, - collection_id: ::CollectionId, - item_id: ::ItemId, + collection_id: T::CollectionId, + item_id: T::ItemId, asset_id: AssetIdOf, beneficiary: T::AccountId, min_balance: AssetBalanceOf, @@ -131,29 +142,32 @@ pub mod pallet { ) -> DispatchResult { let _who = ensure_signed(origin.clone())?; let admin_account_id = Self::pallet_account_id(); - let admin = T::Lookup::unlookup(admin_account_id.clone()); - - match Self::do_lock_nft(origin.clone(), collection_id, item_id) { - Err(e) => return Err(e), - //Ok(()) => match Self::do_create_asset(origin.clone(), asset_id, admin, min_balance) - Ok(()) => match Self::do_create_asset(asset_id, admin_account_id, min_balance) { - Err(e) => return Err(e), - Ok(()) => match Self::do_mint_asset( - // Minting the asset is only possible from the pallet's origin. - // TODO: should the minting be possible from the owner's account? - asset_id, - &beneficiary, - amount, - ) { - Err(e) => return Err(e), - Ok(()) => { - Self::deposit_event(Event::NFTLocked(collection_id, item_id)); - Self::deposit_event(Event::AssetCreated(asset_id)); - Self::deposit_event(Event::AssetMinted(asset_id, amount)); + + Self::do_lock_nft(collection_id, item_id)?; + Self::do_create_asset(asset_id, admin_account_id, min_balance)?; + Self::do_mint_asset( + // Minting the asset is only possible from the pallet's origin. + // TODO: should the minting be possible from the owner's account? + asset_id, + &beneficiary, + amount, + )?; + + >::try_mutate( + asset_id, + |assets_minted| -> Result<(), DispatchError> { + match assets_minted.is_some() { + true => { + *assets_minted = Some(assets_minted.unwrap().saturating_add(amount)) }, - }, + false => *assets_minted = Some(amount), + } + + Ok(()) }, - }; + )?; + + Self::deposit_event(Event::NFTLocked(collection_id, item_id)); Ok(()) } @@ -168,12 +182,11 @@ pub mod pallet { } fn do_lock_nft( - who: OriginFor, - collection_id: ::CollectionId, - item_id: ::ItemId, + collection_id: T::CollectionId, + item_id: T::ItemId, ) -> DispatchResult { - let pallet_id = T::Lookup::unlookup(Self::pallet_account_id()); - >::transfer(who, collection_id, item_id, pallet_id) + let admin_account_id = Self::pallet_account_id(); + T::Items::transfer(&collection_id, &item_id, &admin_account_id) } fn do_create_asset( @@ -191,5 +204,12 @@ pub mod pallet { ) -> DispatchResult { T::Assets::mint_into(asset_id, beneficiary, amount) } + + fn check_total_ownership(asset_id: AssetIdOf, account: &T::AccountId) -> () { + assert_eq!( + Some(T::Assets::balance(asset_id, account)), + >::get(asset_id) + ); + } } } From e907e15e0bda28b5c5db90e6a0bce3393fbc59f1 Mon Sep 17 00:00:00 2001 From: Jegor Sidorenko Date: Thu, 8 Dec 2022 16:48:24 +0200 Subject: [PATCH 034/101] DepositRequired setting should affect only the attributes within the CollectionOwner namespace --- frame/nfts/src/features/attributes.rs | 4 ++- frame/nfts/src/tests.rs | 51 +++++++++++++++++++++++++++ 2 files changed, 54 insertions(+), 1 deletion(-) diff --git a/frame/nfts/src/features/attributes.rs b/frame/nfts/src/features/attributes.rs index 48e9c31d2a9bb..da663d39a4ef5 100644 --- a/frame/nfts/src/features/attributes.rs +++ b/frame/nfts/src/features/attributes.rs @@ -74,7 +74,9 @@ impl, I: 'static> Pallet { attribute.map_or(AttributeDeposit { account: None, amount: Zero::zero() }, |m| m.1); let mut deposit = Zero::zero(); - if collection_config.is_setting_enabled(CollectionSetting::DepositRequired) { + if collection_config.is_setting_enabled(CollectionSetting::DepositRequired) || + namespace != AttributeNamespace::CollectionOwner + { deposit = T::DepositPerByte::get() .saturating_mul(((key.len() + value.len()) as u32).into()) .saturating_add(T::AttributeDepositBase::get()); diff --git a/frame/nfts/src/tests.rs b/frame/nfts/src/tests.rs index 998250dc39ef7..1402ee9793fbe 100644 --- a/frame/nfts/src/tests.rs +++ b/frame/nfts/src/tests.rs @@ -938,6 +938,57 @@ fn set_external_account_attributes_should_work() { }); } +#[test] +fn validate_deposit_required_setting() { + new_test_ext().execute_with(|| { + Balances::make_free_balance_be(&1, 100); + Balances::make_free_balance_be(&2, 100); + Balances::make_free_balance_be(&3, 100); + + // with the disabled DepositRequired setting, only the collection's owner can set the + // attributes for free. + assert_ok!(Nfts::force_create(RuntimeOrigin::root(), 1, default_collection_config())); + assert_ok!(Nfts::force_mint(RuntimeOrigin::signed(1), 0, 0, 2, default_item_config())); + assert_ok!(Nfts::approve_item_attributes(RuntimeOrigin::signed(2), 0, 0, 3)); + + assert_ok!(Nfts::set_attribute( + RuntimeOrigin::signed(1), + 0, + Some(0), + AttributeNamespace::CollectionOwner, + bvec![0], + bvec![0], + )); + assert_ok!(Nfts::set_attribute( + RuntimeOrigin::signed(2), + 0, + Some(0), + AttributeNamespace::ItemOwner, + bvec![1], + bvec![0], + )); + assert_ok!(Nfts::set_attribute( + RuntimeOrigin::signed(3), + 0, + Some(0), + AttributeNamespace::Account(3), + bvec![2], + bvec![0], + )); + assert_eq!( + attributes(0), + vec![ + (Some(0), AttributeNamespace::CollectionOwner, bvec![0], bvec![0]), + (Some(0), AttributeNamespace::ItemOwner, bvec![1], bvec![0]), + (Some(0), AttributeNamespace::Account(3), bvec![2], bvec![0]), + ] + ); + assert_eq!(Balances::reserved_balance(1), 0); + assert_eq!(Balances::reserved_balance(2), 3); + assert_eq!(Balances::reserved_balance(3), 3); + }); +} + #[test] fn set_attribute_should_respect_lock() { new_test_ext().execute_with(|| { From 3ad4980c1ed5a056c1cdcdaa0d6a836559551534 Mon Sep 17 00:00:00 2001 From: lana-shanghai Date: Tue, 13 Dec 2022 19:23:08 +0100 Subject: [PATCH 035/101] Add unlock functionality --- bin/node/runtime/src/lib.rs | 2 + frame/nft-fractionalisation/src/lib.rs | 62 +++++++++++++++++++++----- 2 files changed, 53 insertions(+), 11 deletions(-) diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index d0aa7afcea14f..08275acae7def 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -1549,12 +1549,14 @@ impl pallet_nfts::Config for Runtime { parameter_types! { pub const NftFractionsPalletId: PalletId = PalletId(*b"fraction"); + pub const BuybackThreshold: u32 = 1; } impl pallet_nft_fractionalisation::Config for Runtime { type RuntimeEvent = RuntimeEvent; type PalletId = NftFractionsPalletId; type Currency = Balances; + type BuybackThreshold = BuybackThreshold; type CollectionId = ::CollectionId; type ItemId = ::ItemId; type AssetBalance = ::Balance; diff --git a/frame/nft-fractionalisation/src/lib.rs b/frame/nft-fractionalisation/src/lib.rs index 3b046126fd131..08f01ed6ca02a 100644 --- a/frame/nft-fractionalisation/src/lib.rs +++ b/frame/nft-fractionalisation/src/lib.rs @@ -24,8 +24,8 @@ pub mod pallet { use frame_support::{ dispatch::DispatchResult, sp_runtime::traits::{ - AccountIdConversion, AtLeast32BitUnsigned, IntegerSquareRoot, Saturating, StaticLookup, - Zero, + AccountIdConversion, AtLeast32BitUnsigned, IntegerSquareRoot, SaturatedConversion, + Saturating, StaticLookup, Zero, }, traits::{ fungibles::{ @@ -105,6 +105,9 @@ pub mod pallet { #[pallet::constant] type PalletId: Get; + + #[pallet::constant] + type BuybackThreshold: Get; } #[pallet::storage] @@ -114,6 +117,12 @@ pub mod pallet { pub type AssetsMinted = StorageMap<_, Twox64Concat, AssetIdOf, AssetBalanceOf, OptionQuery>; + #[pallet::storage] + #[pallet::getter(fn asset_to_nft)] + // TODO: store information about Asset ID and the corresponding Collection and Item ID. + pub type AssetToNft = + StorageMap<_, Twox64Concat, AssetIdOf, (T::CollectionId, T::ItemId), OptionQuery>; + #[pallet::event] #[pallet::generate_deposit(pub(super) fn deposit_event)] pub enum Event { @@ -172,8 +181,22 @@ pub mod pallet { Ok(()) } - // TODO: return and burn 100% of the asset, unlock the NFT. - // pub fn burn_asset_unlock_nft() -> DispatchResult {} + // Return and burn a % of the asset, unlock the NFT. Currently 100% is the minimum threshold. + #[pallet::weight(10_000 + T::DbWeight::get().writes(2).ref_time())] + pub fn burn_asset_unlock_nft( + origin: OriginFor, + collection_id: T::CollectionId, + item_id: T::ItemId, + asset_id: AssetIdOf, + amount: AssetBalanceOf, + ) -> DispatchResult { + let who = ensure_signed(origin.clone())?; + + Self::do_burn_asset(asset_id, &who, amount)?; + Self::do_unlock_nft(collection_id, item_id, who)?; + + Ok(()) + } } impl Pallet { @@ -181,14 +204,15 @@ pub mod pallet { T::PalletId::get().into_account_truncating() } - fn do_lock_nft( - collection_id: T::CollectionId, - item_id: T::ItemId, - ) -> DispatchResult { + fn do_lock_nft(collection_id: T::CollectionId, item_id: T::ItemId) -> DispatchResult { let admin_account_id = Self::pallet_account_id(); T::Items::transfer(&collection_id, &item_id, &admin_account_id) } + fn do_unlock_nft(collection_id: T::CollectionId, item_id: T::ItemId, account: T::AccountId) -> DispatchResult { + T::Items::transfer(&collection_id, &item_id, &account) + } + fn do_create_asset( asset_id: AssetIdOf, admin: T::AccountId, @@ -205,10 +229,26 @@ pub mod pallet { T::Assets::mint_into(asset_id, beneficiary, amount) } - fn check_total_ownership(asset_id: AssetIdOf, account: &T::AccountId) -> () { + fn do_burn_asset( + asset_id: AssetIdOf, + account: &T::AccountId, + amount: AssetBalanceOf, + ) -> Result, DispatchError> { + Self::check_token_amount(asset_id, amount); + T::Assets::burn_from(asset_id, account, amount) + } + + fn check_token_amount( + asset_id: AssetIdOf, + amount: AssetBalanceOf, + ) -> () { + // TODO: create a threshold of tokens to return in order to get back the NFT. + // Otherwise one person can hold one token in order not to let others buy back. + let buyback_threshold: AssetBalanceOf = + T::BuybackThreshold::get().saturated_into::>(); assert_eq!( - Some(T::Assets::balance(asset_id, account)), - >::get(asset_id) + Some(amount), + Some(>::get(asset_id).unwrap().saturating_mul(buyback_threshold)) ); } } From 9f27e9140cab1d5ffcb023b1316ab61982ab49ec Mon Sep 17 00:00:00 2001 From: Jegor Sidorenko <5252494+jsidorenko@users.noreply.github.com> Date: Tue, 13 Dec 2022 20:44:40 +0200 Subject: [PATCH 036/101] [NFTs] Implement missed methods to set the attributes from other pallets (#12919) * Implement missed methods to set the attributes from other pallets * Revert snapshots * Update snapshot * Update snapshot --- bin/node/runtime/src/lib.rs | 2 - frame/nfts/src/impl_nonfungibles.rs | 58 +++++++++++++++++++ frame/nfts/src/lib.rs | 8 +-- frame/nfts/src/mock.rs | 3 - frame/nfts/src/tests.rs | 12 +++- frame/support/src/lib.rs | 4 +- frame/support/src/traits/tokens/misc.rs | 3 +- ...ev_mode_without_arg_max_encoded_len.stderr | 2 +- ...age_ensure_span_are_ok_on_wrong_gen.stderr | 6 +- ...re_span_are_ok_on_wrong_gen_unnamed.stderr | 6 +- .../pallet_ui/storage_info_unsatisfied.stderr | 2 +- .../storage_info_unsatisfied_nmap.stderr | 2 +- 12 files changed, 83 insertions(+), 25 deletions(-) diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index 6b94f03115c0f..bcf4b24ae122c 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -1557,7 +1557,6 @@ impl pallet_uniques::Config for Runtime { parameter_types! { pub Features: PalletFeatures = PalletFeatures::all_enabled(); - pub const NftsPalletId: PalletId = PalletId(*b"py/nfts_"); } impl pallet_nfts::Config for Runtime { @@ -1580,7 +1579,6 @@ impl pallet_nfts::Config for Runtime { type MaxDeadlineDuration = MaxDeadlineDuration; type Features = Features; type WeightInfo = pallet_nfts::weights::SubstrateWeight; - type PalletId = NftsPalletId; #[cfg(feature = "runtime-benchmarks")] type Helper = (); type CreateOrigin = AsEnsureOriginWithArg>; diff --git a/frame/nfts/src/impl_nonfungibles.rs b/frame/nfts/src/impl_nonfungibles.rs index 574d256a7705b..9fa696cd5c5c7 100644 --- a/frame/nfts/src/impl_nonfungibles.rs +++ b/frame/nfts/src/impl_nonfungibles.rs @@ -178,6 +178,64 @@ impl, I: 'static> Mutate<::AccountId, ItemConfig Ok(()) }) } + + fn set_attribute( + collection: &Self::CollectionId, + item: &Self::ItemId, + key: &[u8], + value: &[u8], + ) -> DispatchResult { + Self::do_force_set_attribute( + None, + *collection, + Some(*item), + AttributeNamespace::Pallet, + Self::construct_attribute_key(key.to_vec())?, + Self::construct_attribute_value(value.to_vec())?, + ) + } + + fn set_typed_attribute( + collection: &Self::CollectionId, + item: &Self::ItemId, + key: &K, + value: &V, + ) -> DispatchResult { + key.using_encoded(|k| { + value.using_encoded(|v| { + >::set_attribute(collection, item, k, v) + }) + }) + } + + fn set_collection_attribute( + collection: &Self::CollectionId, + key: &[u8], + value: &[u8], + ) -> DispatchResult { + Self::do_force_set_attribute( + None, + *collection, + None, + AttributeNamespace::Pallet, + Self::construct_attribute_key(key.to_vec())?, + Self::construct_attribute_value(value.to_vec())?, + ) + } + + fn set_typed_collection_attribute( + collection: &Self::CollectionId, + key: &K, + value: &V, + ) -> DispatchResult { + key.using_encoded(|k| { + value.using_encoded(|v| { + >::set_collection_attribute( + collection, k, v, + ) + }) + }) + } } impl, I: 'static> Transfer for Pallet { diff --git a/frame/nfts/src/lib.rs b/frame/nfts/src/lib.rs index 0182c54ea41f3..6267df71a9a20 100644 --- a/frame/nfts/src/lib.rs +++ b/frame/nfts/src/lib.rs @@ -65,7 +65,7 @@ type AccountIdLookupOf = <::Lookup as StaticLookup>::Sourc #[frame_support::pallet] pub mod pallet { use super::*; - use frame_support::{pallet_prelude::*, traits::ExistenceRequirement, PalletId}; + use frame_support::{pallet_prelude::*, traits::ExistenceRequirement}; use frame_system::pallet_prelude::*; #[pallet::pallet] @@ -171,10 +171,6 @@ pub mod pallet { #[pallet::constant] type Features: Get; - /// The pallet's id. - #[pallet::constant] - type PalletId: Get; - #[cfg(feature = "runtime-benchmarks")] /// A set of helper functions for benchmarking. type Helper: BenchmarkHelper; @@ -782,7 +778,7 @@ pub mod pallet { let key = ( &collection_id, Some(owner_of_item), - AttributeNamespace::Pallet(T::PalletId::get()), + AttributeNamespace::Pallet, &attribute_key, ); let already_claimed = Attribute::::contains_key(key.clone()); diff --git a/frame/nfts/src/mock.rs b/frame/nfts/src/mock.rs index 78aebb9471481..f814b209d5f78 100644 --- a/frame/nfts/src/mock.rs +++ b/frame/nfts/src/mock.rs @@ -23,7 +23,6 @@ use crate as pallet_nfts; use frame_support::{ construct_runtime, parameter_types, traits::{AsEnsureOriginWithArg, ConstU32, ConstU64}, - PalletId, }; use sp_core::H256; use sp_runtime::{ @@ -87,7 +86,6 @@ impl pallet_balances::Config for Test { parameter_types! { pub storage Features: PalletFeatures = PalletFeatures::all_enabled(); - pub const NftsPalletId: PalletId = PalletId(*b"py/nfts_"); } impl Config for Test { @@ -112,7 +110,6 @@ impl Config for Test { type MaxDeadlineDuration = ConstU64<10000>; type Features = Features; type WeightInfo = (); - type PalletId = NftsPalletId; #[cfg(feature = "runtime-benchmarks")] type Helper = (); } diff --git a/frame/nfts/src/tests.rs b/frame/nfts/src/tests.rs index 1402ee9793fbe..e02e77ebe7dce 100644 --- a/frame/nfts/src/tests.rs +++ b/frame/nfts/src/tests.rs @@ -22,7 +22,10 @@ use enumflags2::BitFlags; use frame_support::{ assert_noop, assert_ok, dispatch::Dispatchable, - traits::{tokens::nonfungibles_v2::Destroy, Currency, Get}, + traits::{ + tokens::nonfungibles_v2::{Destroy, Mutate}, + Currency, Get, + }, }; use pallet_balances::Error as BalancesError; use sp_core::bounded::BoundedVec; @@ -975,12 +978,19 @@ fn validate_deposit_required_setting() { bvec![2], bvec![0], )); + assert_ok!(::AccountId, ItemConfig>>::set_attribute( + &0, + &0, + &[3], + &[0], + )); assert_eq!( attributes(0), vec![ (Some(0), AttributeNamespace::CollectionOwner, bvec![0], bvec![0]), (Some(0), AttributeNamespace::ItemOwner, bvec![1], bvec![0]), (Some(0), AttributeNamespace::Account(3), bvec![2], bvec![0]), + (Some(0), AttributeNamespace::Pallet, bvec![3], bvec![0]), ] ); assert_eq!(Balances::reserved_balance(1), 0); diff --git a/frame/support/src/lib.rs b/frame/support/src/lib.rs index f11e33b669be0..efecbb75f9c62 100644 --- a/frame/support/src/lib.rs +++ b/frame/support/src/lib.rs @@ -115,7 +115,7 @@ pub use sp_runtime::{ self, print, traits::Printable, ConsensusEngineId, MAX_MODULE_ERROR_ENCODED_SIZE, }; -use codec::{Decode, Encode, MaxEncodedLen}; +use codec::{Decode, Encode}; use scale_info::TypeInfo; use sp_runtime::TypeId; @@ -127,7 +127,7 @@ pub const LOG_TARGET: &str = "runtime::frame-support"; pub enum Never {} /// A pallet identifier. These are per pallet and should be stored in a registry somewhere. -#[derive(Clone, Copy, Debug, Eq, PartialEq, Encode, Decode, TypeInfo, MaxEncodedLen)] +#[derive(Clone, Copy, Eq, PartialEq, Encode, Decode, TypeInfo)] pub struct PalletId(pub [u8; 8]); impl TypeId for PalletId { diff --git a/frame/support/src/traits/tokens/misc.rs b/frame/support/src/traits/tokens/misc.rs index f0b172841aa84..f9876ef477b81 100644 --- a/frame/support/src/traits/tokens/misc.rs +++ b/frame/support/src/traits/tokens/misc.rs @@ -17,7 +17,6 @@ //! Miscellaneous types. -use crate::PalletId; use codec::{Decode, Encode, FullCodec, MaxEncodedLen}; use sp_arithmetic::traits::{AtLeast32BitUnsigned, Zero}; use sp_core::RuntimeDebug; @@ -133,7 +132,7 @@ pub enum BalanceStatus { )] pub enum AttributeNamespace { /// An attribute was set by the pallet. - Pallet(PalletId), + Pallet, /// An attribute was set by collection's owner. CollectionOwner, /// An attribute was set by item's owner. diff --git a/frame/support/test/tests/pallet_ui/dev_mode_without_arg_max_encoded_len.stderr b/frame/support/test/tests/pallet_ui/dev_mode_without_arg_max_encoded_len.stderr index bb49e11679028..a5ec31a9bb4e7 100644 --- a/frame/support/test/tests/pallet_ui/dev_mode_without_arg_max_encoded_len.stderr +++ b/frame/support/test/tests/pallet_ui/dev_mode_without_arg_max_encoded_len.stderr @@ -13,5 +13,5 @@ error[E0277]: the trait bound `Vec: MaxEncodedLen` is not satisfied (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5) (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5, TupleElement6) (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5, TupleElement6, TupleElement7) - and 80 others + and 78 others = note: required for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageMyStorage, Vec>` to implement `StorageInfoTrait` diff --git a/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.stderr b/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.stderr index 999d8585c221a..42ef5a34e4c30 100644 --- a/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.stderr +++ b/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.stderr @@ -28,7 +28,7 @@ error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied <&[(T,)] as EncodeLike>> <&[(T,)] as EncodeLike>> <&[T] as EncodeLike>> - and 279 others + and 278 others = note: required for `Bar` to implement `FullEncode` = note: required for `Bar` to implement `FullCodec` = note: required for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` to implement `PartialStorageInfoTrait` @@ -69,7 +69,7 @@ error[E0277]: the trait bound `Bar: TypeInfo` is not satisfied (A, B, C, D) (A, B, C, D, E) (A, B, C, D, E, F) - and 162 others + and 161 others = note: required for `Bar` to implement `StaticTypeInfo` = note: required for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` to implement `StorageEntryMetadataBuilder` @@ -103,7 +103,7 @@ error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied <&[(T,)] as EncodeLike>> <&[(T,)] as EncodeLike>> <&[T] as EncodeLike>> - and 279 others + and 278 others = note: required for `Bar` to implement `FullEncode` = note: required for `Bar` to implement `FullCodec` = note: required for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` to implement `StorageEntryMetadataBuilder` diff --git a/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.stderr b/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.stderr index e2870ffb9e86f..461d63ebb0d9c 100644 --- a/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.stderr +++ b/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.stderr @@ -28,7 +28,7 @@ error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied <&[(T,)] as EncodeLike>> <&[(T,)] as EncodeLike>> <&[T] as EncodeLike>> - and 279 others + and 278 others = note: required for `Bar` to implement `FullEncode` = note: required for `Bar` to implement `FullCodec` = note: required for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` to implement `PartialStorageInfoTrait` @@ -69,7 +69,7 @@ error[E0277]: the trait bound `Bar: TypeInfo` is not satisfied (A, B, C, D) (A, B, C, D, E) (A, B, C, D, E, F) - and 162 others + and 161 others = note: required for `Bar` to implement `StaticTypeInfo` = note: required for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` to implement `StorageEntryMetadataBuilder` @@ -103,7 +103,7 @@ error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied <&[(T,)] as EncodeLike>> <&[(T,)] as EncodeLike>> <&[T] as EncodeLike>> - and 279 others + and 278 others = note: required for `Bar` to implement `FullEncode` = note: required for `Bar` to implement `FullCodec` = note: required for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` to implement `StorageEntryMetadataBuilder` diff --git a/frame/support/test/tests/pallet_ui/storage_info_unsatisfied.stderr b/frame/support/test/tests/pallet_ui/storage_info_unsatisfied.stderr index d5b0c3b50a5ac..cce9fa70b3da5 100644 --- a/frame/support/test/tests/pallet_ui/storage_info_unsatisfied.stderr +++ b/frame/support/test/tests/pallet_ui/storage_info_unsatisfied.stderr @@ -13,5 +13,5 @@ error[E0277]: the trait bound `Bar: MaxEncodedLen` is not satisfied (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5) (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5, TupleElement6) (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5, TupleElement6, TupleElement7) - and 80 others + and 78 others = note: required for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` to implement `StorageInfoTrait` diff --git a/frame/support/test/tests/pallet_ui/storage_info_unsatisfied_nmap.stderr b/frame/support/test/tests/pallet_ui/storage_info_unsatisfied_nmap.stderr index 6b174d13c5778..877485dda2084 100644 --- a/frame/support/test/tests/pallet_ui/storage_info_unsatisfied_nmap.stderr +++ b/frame/support/test/tests/pallet_ui/storage_info_unsatisfied_nmap.stderr @@ -13,6 +13,6 @@ error[E0277]: the trait bound `Bar: MaxEncodedLen` is not satisfied (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5) (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5, TupleElement6) (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5, TupleElement6, TupleElement7) - and 80 others + and 78 others = note: required for `Key` to implement `KeyGeneratorMaxEncodedLen` = note: required for `frame_support::pallet_prelude::StorageNMap<_GeneratedPrefixForStorageFoo, Key, u32>` to implement `StorageInfoTrait` From ea37f258cdb18e01458fac535dada63b0db876d3 Mon Sep 17 00:00:00 2001 From: Jegor Sidorenko Date: Wed, 14 Dec 2022 09:52:25 +0200 Subject: [PATCH 037/101] Revert snapshot changes --- .../storage_ensure_span_are_ok_on_wrong_gen.stderr | 6 +++--- .../storage_ensure_span_are_ok_on_wrong_gen_unnamed.stderr | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.stderr b/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.stderr index 42ef5a34e4c30..999d8585c221a 100644 --- a/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.stderr +++ b/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.stderr @@ -28,7 +28,7 @@ error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied <&[(T,)] as EncodeLike>> <&[(T,)] as EncodeLike>> <&[T] as EncodeLike>> - and 278 others + and 279 others = note: required for `Bar` to implement `FullEncode` = note: required for `Bar` to implement `FullCodec` = note: required for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` to implement `PartialStorageInfoTrait` @@ -69,7 +69,7 @@ error[E0277]: the trait bound `Bar: TypeInfo` is not satisfied (A, B, C, D) (A, B, C, D, E) (A, B, C, D, E, F) - and 161 others + and 162 others = note: required for `Bar` to implement `StaticTypeInfo` = note: required for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` to implement `StorageEntryMetadataBuilder` @@ -103,7 +103,7 @@ error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied <&[(T,)] as EncodeLike>> <&[(T,)] as EncodeLike>> <&[T] as EncodeLike>> - and 278 others + and 279 others = note: required for `Bar` to implement `FullEncode` = note: required for `Bar` to implement `FullCodec` = note: required for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` to implement `StorageEntryMetadataBuilder` diff --git a/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.stderr b/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.stderr index 461d63ebb0d9c..e2870ffb9e86f 100644 --- a/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.stderr +++ b/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.stderr @@ -28,7 +28,7 @@ error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied <&[(T,)] as EncodeLike>> <&[(T,)] as EncodeLike>> <&[T] as EncodeLike>> - and 278 others + and 279 others = note: required for `Bar` to implement `FullEncode` = note: required for `Bar` to implement `FullCodec` = note: required for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` to implement `PartialStorageInfoTrait` @@ -69,7 +69,7 @@ error[E0277]: the trait bound `Bar: TypeInfo` is not satisfied (A, B, C, D) (A, B, C, D, E) (A, B, C, D, E, F) - and 161 others + and 162 others = note: required for `Bar` to implement `StaticTypeInfo` = note: required for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` to implement `StorageEntryMetadataBuilder` @@ -103,7 +103,7 @@ error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied <&[(T,)] as EncodeLike>> <&[(T,)] as EncodeLike>> <&[T] as EncodeLike>> - and 278 others + and 279 others = note: required for `Bar` to implement `FullEncode` = note: required for `Bar` to implement `FullCodec` = note: required for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` to implement `StorageEntryMetadataBuilder` From c057f02aee215d7ca2434b2dcfff792621c2e735 Mon Sep 17 00:00:00 2001 From: Jegor Sidorenko Date: Wed, 14 Dec 2022 14:37:14 +0200 Subject: [PATCH 038/101] Update snapshots --- .../storage_ensure_span_are_ok_on_wrong_gen.stderr | 6 +++--- .../storage_ensure_span_are_ok_on_wrong_gen_unnamed.stderr | 6 +++--- .../test/tests/pallet_ui/storage_info_unsatisfied.stderr | 2 +- .../tests/pallet_ui/storage_info_unsatisfied_nmap.stderr | 2 +- 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.stderr b/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.stderr index 999d8585c221a..ac5a1f46f8a6b 100644 --- a/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.stderr +++ b/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.stderr @@ -28,7 +28,7 @@ error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied <&[(T,)] as EncodeLike>> <&[(T,)] as EncodeLike>> <&[T] as EncodeLike>> - and 279 others + and 280 others = note: required for `Bar` to implement `FullEncode` = note: required for `Bar` to implement `FullCodec` = note: required for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` to implement `PartialStorageInfoTrait` @@ -69,7 +69,7 @@ error[E0277]: the trait bound `Bar: TypeInfo` is not satisfied (A, B, C, D) (A, B, C, D, E) (A, B, C, D, E, F) - and 162 others + and 163 others = note: required for `Bar` to implement `StaticTypeInfo` = note: required for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` to implement `StorageEntryMetadataBuilder` @@ -103,7 +103,7 @@ error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied <&[(T,)] as EncodeLike>> <&[(T,)] as EncodeLike>> <&[T] as EncodeLike>> - and 279 others + and 280 others = note: required for `Bar` to implement `FullEncode` = note: required for `Bar` to implement `FullCodec` = note: required for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` to implement `StorageEntryMetadataBuilder` diff --git a/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.stderr b/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.stderr index e2870ffb9e86f..e0c1609403c3a 100644 --- a/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.stderr +++ b/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.stderr @@ -28,7 +28,7 @@ error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied <&[(T,)] as EncodeLike>> <&[(T,)] as EncodeLike>> <&[T] as EncodeLike>> - and 279 others + and 280 others = note: required for `Bar` to implement `FullEncode` = note: required for `Bar` to implement `FullCodec` = note: required for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` to implement `PartialStorageInfoTrait` @@ -69,7 +69,7 @@ error[E0277]: the trait bound `Bar: TypeInfo` is not satisfied (A, B, C, D) (A, B, C, D, E) (A, B, C, D, E, F) - and 162 others + and 163 others = note: required for `Bar` to implement `StaticTypeInfo` = note: required for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` to implement `StorageEntryMetadataBuilder` @@ -103,7 +103,7 @@ error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied <&[(T,)] as EncodeLike>> <&[(T,)] as EncodeLike>> <&[T] as EncodeLike>> - and 279 others + and 280 others = note: required for `Bar` to implement `FullEncode` = note: required for `Bar` to implement `FullCodec` = note: required for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` to implement `StorageEntryMetadataBuilder` diff --git a/frame/support/test/tests/pallet_ui/storage_info_unsatisfied.stderr b/frame/support/test/tests/pallet_ui/storage_info_unsatisfied.stderr index cce9fa70b3da5..b5443c6f327e4 100644 --- a/frame/support/test/tests/pallet_ui/storage_info_unsatisfied.stderr +++ b/frame/support/test/tests/pallet_ui/storage_info_unsatisfied.stderr @@ -13,5 +13,5 @@ error[E0277]: the trait bound `Bar: MaxEncodedLen` is not satisfied (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5) (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5, TupleElement6) (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5, TupleElement6, TupleElement7) - and 78 others + and 79 others = note: required for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` to implement `StorageInfoTrait` diff --git a/frame/support/test/tests/pallet_ui/storage_info_unsatisfied_nmap.stderr b/frame/support/test/tests/pallet_ui/storage_info_unsatisfied_nmap.stderr index 877485dda2084..afc7aaa8768cf 100644 --- a/frame/support/test/tests/pallet_ui/storage_info_unsatisfied_nmap.stderr +++ b/frame/support/test/tests/pallet_ui/storage_info_unsatisfied_nmap.stderr @@ -13,6 +13,6 @@ error[E0277]: the trait bound `Bar: MaxEncodedLen` is not satisfied (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5) (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5, TupleElement6) (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5, TupleElement6, TupleElement7) - and 78 others + and 79 others = note: required for `Key` to implement `KeyGeneratorMaxEncodedLen` = note: required for `frame_support::pallet_prelude::StorageNMap<_GeneratedPrefixForStorageFoo, Key, u32>` to implement `StorageInfoTrait` From 5aaa37f649063514766ce25ba62b6a2b56a1ca57 Mon Sep 17 00:00:00 2001 From: Jegor Sidorenko Date: Wed, 14 Dec 2022 14:52:32 +0200 Subject: [PATCH 039/101] Yet another snapshot update.. --- .../tests/pallet_ui/dev_mode_without_arg_max_encoded_len.stderr | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/frame/support/test/tests/pallet_ui/dev_mode_without_arg_max_encoded_len.stderr b/frame/support/test/tests/pallet_ui/dev_mode_without_arg_max_encoded_len.stderr index a5ec31a9bb4e7..91bab62e2d353 100644 --- a/frame/support/test/tests/pallet_ui/dev_mode_without_arg_max_encoded_len.stderr +++ b/frame/support/test/tests/pallet_ui/dev_mode_without_arg_max_encoded_len.stderr @@ -13,5 +13,5 @@ error[E0277]: the trait bound `Vec: MaxEncodedLen` is not satisfied (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5) (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5, TupleElement6) (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5, TupleElement6, TupleElement7) - and 78 others + and 79 others = note: required for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageMyStorage, Vec>` to implement `StorageInfoTrait` From 0482d19d8541d6d0d9e8c8f9b019fb0ccd954058 Mon Sep 17 00:00:00 2001 From: lana-shanghai Date: Wed, 14 Dec 2022 19:55:21 +0100 Subject: [PATCH 040/101] Asset to NFT id storage mutations --- frame/nft-fractionalisation/src/lib.rs | 73 ++++++++++++++++++++------ 1 file changed, 57 insertions(+), 16 deletions(-) diff --git a/frame/nft-fractionalisation/src/lib.rs b/frame/nft-fractionalisation/src/lib.rs index 08f01ed6ca02a..704e31e792ee8 100644 --- a/frame/nft-fractionalisation/src/lib.rs +++ b/frame/nft-fractionalisation/src/lib.rs @@ -119,17 +119,15 @@ pub mod pallet { #[pallet::storage] #[pallet::getter(fn asset_to_nft)] - // TODO: store information about Asset ID and the corresponding Collection and Item ID. + // TODO: store information about Asset ID and the corresponding Collection and Item ID. pub type AssetToNft = StorageMap<_, Twox64Concat, AssetIdOf, (T::CollectionId, T::ItemId), OptionQuery>; #[pallet::event] #[pallet::generate_deposit(pub(super) fn deposit_event)] pub enum Event { - PalletIdCreated(T::AccountId), NFTLocked(T::CollectionId, T::ItemId), - AssetCreated(AssetIdOf), - AssetMinted(AssetIdOf, AssetBalanceOf), + NFTUnlocked(T::CollectionId, T::ItemId), } #[pallet::error] @@ -155,13 +153,12 @@ pub mod pallet { Self::do_lock_nft(collection_id, item_id)?; Self::do_create_asset(asset_id, admin_account_id, min_balance)?; Self::do_mint_asset( - // Minting the asset is only possible from the pallet's origin. - // TODO: should the minting be possible from the owner's account? asset_id, &beneficiary, amount, )?; + // Mutate this storage item to retain information about the amount minted. >::try_mutate( asset_id, |assets_minted| -> Result<(), DispatchError> { @@ -176,24 +173,42 @@ pub mod pallet { }, )?; + // Mutate this storage item to retain information about the asset created. + >::try_mutate(asset_id, |nft_id| -> Result<(), DispatchError> { + *nft_id = Some((collection_id, item_id)); + + Ok(()) + })?; + Self::deposit_event(Event::NFTLocked(collection_id, item_id)); Ok(()) } - // Return and burn a % of the asset, unlock the NFT. Currently 100% is the minimum threshold. + /// Return and burn a % of the asset, unlock the NFT. Currently 100% is the minimum threshold. #[pallet::weight(10_000 + T::DbWeight::get().writes(2).ref_time())] pub fn burn_asset_unlock_nft( origin: OriginFor, - collection_id: T::CollectionId, - item_id: T::ItemId, asset_id: AssetIdOf, amount: AssetBalanceOf, ) -> DispatchResult { let who = ensure_signed(origin.clone())?; + let (collection_id, item_id) = Self::get_nft_id(asset_id); Self::do_burn_asset(asset_id, &who, amount)?; - Self::do_unlock_nft(collection_id, item_id, who)?; + + // Mutate this storage item to retain information about the amount burned. + >::try_mutate( + asset_id, + |assets_minted| -> Result<(), DispatchError> { + *assets_minted = Some(assets_minted.unwrap().saturating_sub(amount)); + Ok(()) + }, + )?; + + Self::do_unlock_nft(collection_id, item_id, asset_id, who)?; + + Self::deposit_event(Event::NFTUnlocked(collection_id, item_id)); Ok(()) } @@ -204,15 +219,37 @@ pub mod pallet { T::PalletId::get().into_account_truncating() } + /// Transfer the NFT from the account locking the NFT to the pallet's account. fn do_lock_nft(collection_id: T::CollectionId, item_id: T::ItemId) -> DispatchResult { let admin_account_id = Self::pallet_account_id(); T::Items::transfer(&collection_id, &item_id, &admin_account_id) } - fn do_unlock_nft(collection_id: T::CollectionId, item_id: T::ItemId, account: T::AccountId) -> DispatchResult { - T::Items::transfer(&collection_id, &item_id, &account) + /// Transfer the NFT to the account returning the tokens. Remove the key and value from + /// storage. + fn do_unlock_nft( + collection_id: T::CollectionId, + item_id: T::ItemId, + asset_id: AssetIdOf, + account: T::AccountId, + ) -> DispatchResult { + match T::Items::transfer(&collection_id, &item_id, &account) { + Ok(()) => { + >::take(asset_id); + return Ok(()) + }, + Err(e) => return Err(e), + } + } + + /// Assert that the `asset_id` was created by means of locking an NFT and fetch + /// its `CollectionId` and `ItemId`. + fn get_nft_id(asset_id: AssetIdOf) -> (T::CollectionId, T::ItemId) { + assert_eq!(>::contains_key(asset_id), true); + >::get(asset_id).unwrap() } + /// Create the new asset. fn do_create_asset( asset_id: AssetIdOf, admin: T::AccountId, @@ -221,6 +258,7 @@ pub mod pallet { T::Assets::create(asset_id, admin, true, min_balance) } + /// Mint the `amount` of tokens with `asset_id` into the beneficiary's account. fn do_mint_asset( asset_id: AssetIdOf, beneficiary: &T::AccountId, @@ -229,19 +267,22 @@ pub mod pallet { T::Assets::mint_into(asset_id, beneficiary, amount) } + /// If the amount of tokens is enough for the buyback, burn the tokens from the + /// account that is returning the tokens. fn do_burn_asset( asset_id: AssetIdOf, account: &T::AccountId, amount: AssetBalanceOf, ) -> Result, DispatchError> { + // Assert that the asset exists in storage. + assert_eq!(>::contains_key(asset_id), true); Self::check_token_amount(asset_id, amount); T::Assets::burn_from(asset_id, account, amount) } - fn check_token_amount( - asset_id: AssetIdOf, - amount: AssetBalanceOf, - ) -> () { + /// Assert that the amount of tokens returned is equal to the amount needed to buy + /// back the locked NFT. + fn check_token_amount(asset_id: AssetIdOf, amount: AssetBalanceOf) -> () { // TODO: create a threshold of tokens to return in order to get back the NFT. // Otherwise one person can hold one token in order not to let others buy back. let buyback_threshold: AssetBalanceOf = From 6fea29339fd63c8aaa1152c15e817706a136ae6c Mon Sep 17 00:00:00 2001 From: lana-shanghai Date: Wed, 14 Dec 2022 20:08:54 +0100 Subject: [PATCH 041/101] Minor fixes --- client/executor/src/wasm_runtime.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/client/executor/src/wasm_runtime.rs b/client/executor/src/wasm_runtime.rs index a197407dda240..090ca99b693d4 100644 --- a/client/executor/src/wasm_runtime.rs +++ b/client/executor/src/wasm_runtime.rs @@ -330,7 +330,6 @@ where }, ) .map(|runtime| -> Arc { Arc::new(runtime) }) - }, } } From 0f9f4a967d5ce5e4e7356dbc0a1150f6672c76a0 Mon Sep 17 00:00:00 2001 From: lana-shanghai Date: Wed, 14 Dec 2022 20:38:47 +0100 Subject: [PATCH 042/101] Minor comments --- frame/nft-fractionalisation/src/lib.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/frame/nft-fractionalisation/src/lib.rs b/frame/nft-fractionalisation/src/lib.rs index 704e31e792ee8..c9dd822e7a5d6 100644 --- a/frame/nft-fractionalisation/src/lib.rs +++ b/frame/nft-fractionalisation/src/lib.rs @@ -138,6 +138,8 @@ pub mod pallet { #[pallet::call] impl Pallet { #[pallet::weight(10_000 + T::DbWeight::get().writes(2).ref_time())] + /// Pallet's account must be funded before lock is possible! + /// 5EYCAe5gjC5dxKPbV2GPQUetETjFNSYZsSwSurVTTXidSLbh pub fn lock_nft_create_asset( origin: OriginFor, collection_id: T::CollectionId, From 6814a94010ca18329b0e9dee699bc042e1a4e973 Mon Sep 17 00:00:00 2001 From: lana-shanghai Date: Thu, 15 Dec 2022 12:20:38 +0100 Subject: [PATCH 043/101] cargo fmt --- bin/node-template/node/src/command.rs | 12 +- bin/node-template/node/src/service.rs | 7 +- bin/node/bench/src/core.rs | 8 +- bin/node/bench/src/main.rs | 2 +- bin/node/bench/src/simple_trie.rs | 2 +- bin/node/bench/src/tempdb.rs | 5 +- bin/node/cli/src/command.rs | 17 +- bin/node/cli/src/service.rs | 2 +- bin/node/cli/tests/common.rs | 4 +- bin/node/cli/tests/telemetry.rs | 2 +- bin/node/inspect/src/lib.rs | 6 +- bin/node/runtime/src/impls.rs | 6 +- bin/node/runtime/src/lib.rs | 28 +-- bin/node/testing/src/bench.rs | 18 +- client/allocator/src/freeing_bump.rs | 10 +- client/api/src/backend.rs | 2 +- client/api/src/execution_extensions.rs | 11 +- client/api/src/in_mem.rs | 20 +- client/api/src/leaves.rs | 6 +- client/api/src/lib.rs | 6 +- client/api/src/notifications/registry.rs | 9 +- client/authority-discovery/src/worker.rs | 10 +- .../src/worker/addr_cache.rs | 4 +- .../basic-authorship/src/basic_authorship.rs | 26 +-- client/beefy/rpc/src/lib.rs | 2 +- client/beefy/src/communication/gossip.rs | 8 +- .../incoming_requests_handler.rs | 4 +- .../outgoing_requests_engine.rs | 6 +- client/beefy/src/justification.rs | 8 +- client/beefy/src/round.rs | 6 +- client/beefy/src/worker.rs | 10 +- client/chain-spec/derive/src/impls.rs | 4 +- client/cli/src/arg_enums.rs | 35 ++- client/cli/src/commands/purge_chain_cmd.rs | 2 +- client/cli/src/commands/run_cmd.rs | 10 +- client/cli/src/commands/vanity.rs | 6 +- client/cli/src/commands/verify.rs | 2 +- client/cli/src/config.rs | 2 +- client/cli/src/params/network_params.rs | 9 +- client/cli/src/params/node_key_params.rs | 8 +- client/cli/src/params/shared_params.rs | 5 +- client/consensus/aura/src/import_queue.rs | 5 +- client/consensus/aura/src/lib.rs | 11 +- client/consensus/babe/src/authorship.rs | 18 +- client/consensus/babe/src/aux_schema.rs | 24 +- client/consensus/babe/src/lib.rs | 70 +++--- client/consensus/babe/src/tests.rs | 2 +- client/consensus/babe/src/verification.rs | 8 +- client/consensus/common/src/block_import.rs | 11 +- client/consensus/common/src/import_queue.rs | 11 +- .../common/src/import_queue/basic_queue.rs | 17 +- .../common/src/import_queue/buffered_link.rs | 15 +- client/consensus/epochs/src/lib.rs | 121 ++++------ .../manual-seal/src/consensus/babe.rs | 7 +- .../consensus/manual-seal/src/seal_block.rs | 9 +- client/consensus/pow/src/lib.rs | 41 ++-- client/consensus/pow/src/worker.rs | 8 +- client/consensus/slots/src/aux_schema.rs | 6 +- client/consensus/slots/src/lib.rs | 30 +-- client/consensus/slots/src/slots.rs | 4 +- client/db/src/bench.rs | 6 +- client/db/src/lib.rs | 135 ++++++----- client/db/src/parity_db.rs | 18 +- client/db/src/upgrade.rs | 5 +- client/db/src/utils.rs | 33 ++- client/executor/benches/bench.rs | 2 +- .../runtime_blob/data_segments_snapshot.rs | 4 +- .../common/src/runtime_blob/runtime_blob.rs | 9 +- client/executor/common/src/sandbox.rs | 20 +- .../common/src/sandbox/wasmer_backend.rs | 19 +- client/executor/runtime-test/src/lib.rs | 4 +- client/executor/src/native_executor.rs | 2 +- client/executor/src/wasm_runtime.rs | 2 +- client/executor/wasmi/src/lib.rs | 21 +- client/executor/wasmtime/src/host.rs | 11 +- client/executor/wasmtime/src/imports.rs | 9 +- .../executor/wasmtime/src/instance_wrapper.rs | 12 +- client/executor/wasmtime/src/runtime.rs | 26 +-- client/executor/wasmtime/src/util.rs | 4 +- client/finality-grandpa/src/authorities.rs | 41 ++-- client/finality-grandpa/src/aux_schema.rs | 25 +-- .../src/communication/gossip.rs | 158 +++++++------ .../finality-grandpa/src/communication/mod.rs | 51 ++--- .../src/communication/periodic.rs | 4 +- .../src/communication/tests.rs | 12 +- client/finality-grandpa/src/environment.rs | 62 +++--- client/finality-grandpa/src/finality_proof.rs | 12 +- client/finality-grandpa/src/import.rs | 36 ++- client/finality-grandpa/src/justification.rs | 19 +- client/finality-grandpa/src/lib.rs | 8 +- client/finality-grandpa/src/observer.rs | 10 +- client/finality-grandpa/src/tests.rs | 8 +- client/finality-grandpa/src/until_imported.rs | 22 +- client/finality-grandpa/src/voting_rule.rs | 14 +- client/finality-grandpa/src/warp_proof.rs | 12 +- client/informant/src/display.rs | 20 +- client/keystore/src/lib.rs | 5 +- client/keystore/src/local.rs | 23 +- client/network-gossip/src/bridge.rs | 14 +- client/network-gossip/src/state_machine.rs | 19 +- client/network/bitswap/src/lib.rs | 14 +- client/network/common/src/config.rs | 5 +- .../network/common/src/service/signature.rs | 4 +- client/network/common/src/utils.rs | 2 +- .../src/light_client_requests/handler.rs | 24 +- client/network/src/behaviour.rs | 50 ++--- client/network/src/discovery.rs | 72 +++--- client/network/src/network_state.rs | 10 +- client/network/src/peer_info.rs | 36 ++- client/network/src/protocol.rs | 96 ++++---- client/network/src/protocol/message.rs | 7 +- .../src/protocol/notifications/behaviour.rs | 104 +++++---- .../src/protocol/notifications/handler.rs | 73 +++--- .../src/protocol/notifications/tests.rs | 22 +- .../notifications/upgrade/notifications.rs | 57 +++-- client/network/src/request_responses.rs | 63 +++--- client/network/src/service.rs | 102 ++++----- client/network/src/service/out_events.rs | 10 +- client/network/src/service/tests/service.rs | 41 ++-- .../network/sync/src/block_request_handler.rs | 19 +- client/network/sync/src/blocks.rs | 30 ++- client/network/sync/src/extra_requests.rs | 22 +- client/network/sync/src/lib.rs | 209 +++++++++--------- client/network/sync/src/state.rs | 8 +- client/network/sync/src/warp.rs | 9 +- client/network/test/src/lib.rs | 15 +- client/network/test/src/sync.rs | 50 ++--- client/network/transactions/src/lib.rs | 19 +- client/offchain/src/api.rs | 5 +- client/offchain/src/api/http.rs | 69 +++--- client/offchain/src/api/timestamp.rs | 5 +- client/peerset/src/lib.rs | 63 +++--- client/peerset/src/peersstate.rs | 34 ++- client/peerset/tests/fuzz.rs | 19 +- client/rpc-api/src/chain/error.rs | 5 +- client/rpc-api/src/dev/error.rs | 20 +- client/rpc-api/src/state/error.rs | 10 +- client/rpc-api/src/system/error.rs | 5 +- client/rpc-spec-v2/src/transaction/error.rs | 35 ++- client/rpc-spec-v2/src/transaction/event.rs | 45 ++-- .../src/transaction/transaction.rs | 22 +- client/rpc/src/author/mod.rs | 4 +- client/rpc/src/dev/mod.rs | 4 +- client/rpc/src/state/mod.rs | 4 +- client/rpc/src/state/state_full.rs | 35 ++- client/rpc/src/system/tests.rs | 12 +- client/service/src/builder.rs | 18 +- client/service/src/chain_ops/export_blocks.rs | 9 +- client/service/src/chain_ops/import_blocks.rs | 25 +-- client/service/src/client/block_rules.rs | 4 +- client/service/src/client/client.rs | 120 +++++----- client/service/src/client/wasm_override.rs | 4 +- client/service/src/lib.rs | 12 +- client/service/test/src/lib.rs | 2 +- client/state-db/src/lib.rs | 53 ++--- client/state-db/src/noncanonical.rs | 28 +-- client/state-db/src/pruning.rs | 4 +- client/sysinfo/src/sysinfo.rs | 12 +- client/telemetry/src/endpoints.rs | 2 +- client/telemetry/src/lib.rs | 6 +- client/telemetry/src/node.rs | 14 +- client/telemetry/src/transport.rs | 2 +- client/tracing/proc-macro/src/lib.rs | 2 +- client/tracing/src/block/mod.rs | 10 +- client/tracing/src/lib.rs | 20 +- client/tracing/src/logging/event_format.rs | 15 +- .../src/logging/layers/prefix_layer.rs | 4 +- client/tracing/src/logging/stderr_writer.rs | 4 +- .../transaction-pool/src/enactment_state.rs | 6 +- .../transaction-pool/src/graph/base_pool.rs | 24 +- client/transaction-pool/src/graph/future.rs | 4 +- client/transaction-pool/src/graph/pool.rs | 17 +- client/transaction-pool/src/graph/ready.rs | 20 +- client/transaction-pool/src/graph/rotator.rs | 2 +- .../src/graph/validated_pool.rs | 22 +- client/transaction-pool/src/lib.rs | 29 ++- client/transaction-pool/src/revalidation.rs | 2 +- client/utils/src/mpsc.rs | 2 +- client/utils/src/status_sinks.rs | 2 +- frame/alliance/src/lib.rs | 2 +- frame/assets/src/functions.rs | 36 +-- frame/assets/src/impl_stored_map.rs | 2 +- frame/assets/src/lib.rs | 2 +- frame/assets/src/types.rs | 2 +- frame/aura/src/lib.rs | 4 +- frame/authorship/src/lib.rs | 29 ++- frame/babe/src/equivocation.rs | 2 +- frame/babe/src/lib.rs | 8 +- frame/babe/src/tests.rs | 6 +- frame/bags-list/remote-tests/src/lib.rs | 2 +- frame/bags-list/src/list/mod.rs | 52 ++--- frame/bags-list/src/migrations.rs | 2 +- frame/balances/src/lib.rs | 97 ++++---- frame/beefy-mmr/primitives/src/lib.rs | 4 +- frame/beefy/src/lib.rs | 4 +- frame/benchmarking/src/analysis.rs | 14 +- frame/benchmarking/src/lib.rs | 2 +- frame/benchmarking/src/tests.rs | 2 +- frame/bounties/src/benchmarking.rs | 4 +- frame/bounties/src/lib.rs | 26 +-- frame/bounties/src/migrations/v4.rs | 2 +- frame/child-bounties/src/benchmarking.rs | 4 +- frame/child-bounties/src/lib.rs | 24 +- frame/collective/src/lib.rs | 8 +- frame/collective/src/migrations/v4.rs | 6 +- frame/contracts/primitives/src/lib.rs | 20 +- frame/contracts/proc-macro/src/lib.rs | 8 +- frame/contracts/src/benchmarking/code.rs | 10 +- frame/contracts/src/benchmarking/mod.rs | 12 +- frame/contracts/src/exec.rs | 33 ++- frame/contracts/src/lib.rs | 7 +- frame/contracts/src/migration.rs | 2 +- frame/contracts/src/schedule.rs | 43 ++-- frame/contracts/src/storage.rs | 19 +- frame/contracts/src/storage/meter.rs | 30 ++- frame/contracts/src/tests.rs | 4 +- frame/contracts/src/wasm/mod.rs | 10 +- frame/contracts/src/wasm/prepare.rs | 61 +++-- frame/contracts/src/wasm/runtime.rs | 34 ++- frame/conviction-voting/src/lib.rs | 7 +- frame/conviction-voting/src/tests.rs | 10 +- frame/conviction-voting/src/vote.rs | 20 +- frame/democracy/src/lib.rs | 10 +- frame/democracy/src/migrations.rs | 12 +- frame/democracy/src/vote.rs | 10 +- frame/democracy/src/vote_threshold.rs | 20 +- .../election-provider-multi-phase/src/lib.rs | 8 +- .../election-provider-multi-phase/src/mock.rs | 16 +- .../src/signed.rs | 6 +- .../src/unsigned.rs | 31 ++- .../solution-type/src/lib.rs | 8 +- .../solution-type/src/single_page.rs | 2 +- frame/election-provider-support/src/lib.rs | 5 +- frame/election-provider-support/src/mock.rs | 2 +- .../election-provider-support/src/onchain.rs | 2 +- frame/elections-phragmen/src/lib.rs | 8 +- frame/elections-phragmen/src/migrations/v4.rs | 2 +- frame/examples/basic/src/lib.rs | 2 +- frame/examples/offchain-worker/src/lib.rs | 33 ++- frame/executive/src/lib.rs | 29 ++- frame/fast-unstake/src/lib.rs | 19 +- frame/fast-unstake/src/mock.rs | 4 +- frame/gilt/src/lib.rs | 6 +- frame/grandpa/src/equivocation.rs | 2 +- frame/grandpa/src/lib.rs | 10 +- frame/grandpa/src/migrations/v4.rs | 2 +- frame/grandpa/src/tests.rs | 4 +- frame/identity/src/benchmarking.rs | 18 +- frame/identity/src/lib.rs | 16 +- frame/identity/src/types.rs | 5 +- frame/im-online/src/lib.rs | 29 ++- frame/im-online/src/tests.rs | 15 +- frame/lottery/src/lib.rs | 10 +- frame/membership/src/lib.rs | 2 +- frame/membership/src/migrations/v4.rs | 6 +- frame/merkle-mountain-range/rpc/src/lib.rs | 4 +- frame/merkle-mountain-range/src/lib.rs | 12 +- frame/merkle-mountain-range/src/mmr/mmr.rs | 4 +- .../merkle-mountain-range/src/mmr/storage.rs | 6 +- frame/merkle-mountain-range/src/mmr/utils.rs | 4 +- frame/multisig/src/migrations.rs | 2 +- frame/nft-fractionalisation/src/lib.rs | 16 +- frame/nfts/src/features/metadata.rs | 20 +- frame/nfts/src/features/settings.rs | 2 +- frame/nfts/src/features/transfer.rs | 4 +- frame/nfts/src/impl_nonfungibles.rs | 10 +- frame/nfts/src/lib.rs | 4 +- frame/nfts/src/tests.rs | 6 +- frame/node-authorization/src/lib.rs | 4 +- frame/nomination-pools/fuzzer/src/call.rs | 8 +- frame/nomination-pools/src/lib.rs | 45 ++-- frame/nomination-pools/src/migration.rs | 10 +- frame/preimage/src/lib.rs | 17 +- frame/preimage/src/migration.rs | 18 +- frame/proxy/src/lib.rs | 22 +- frame/ranked-collective/src/lib.rs | 7 +- frame/ranked-collective/src/tests.rs | 10 +- frame/referenda/src/branch.rs | 22 +- frame/referenda/src/lib.rs | 18 +- frame/referenda/src/types.rs | 25 +-- frame/scheduler/src/benchmarking.rs | 6 +- frame/scheduler/src/lib.rs | 31 ++- frame/scheduler/src/migration.rs | 6 +- frame/scheduler/src/tests.rs | 36 +-- frame/scored-pool/src/lib.rs | 10 +- frame/session/src/historical/mod.rs | 6 +- frame/session/src/lib.rs | 8 +- frame/session/src/migrations/v1.rs | 6 +- frame/session/src/mock.rs | 4 +- frame/society/src/lib.rs | 18 +- frame/staking/reward-curve/src/lib.rs | 30 +-- frame/staking/reward-curve/src/log.rs | 4 +- frame/staking/reward-fn/src/lib.rs | 16 +- frame/staking/src/benchmarking.rs | 2 +- frame/staking/src/inflation.rs | 4 +- frame/staking/src/lib.rs | 8 +- frame/staking/src/migrations.rs | 6 +- frame/staking/src/mock.rs | 4 +- frame/staking/src/pallet/impls.rs | 45 ++-- frame/staking/src/pallet/mod.rs | 55 ++--- frame/staking/src/slashing.rs | 10 +- frame/staking/src/tests.rs | 12 +- frame/state-trie-migration/src/lib.rs | 14 +- .../support/procedural/src/clone_no_bound.rs | 2 +- .../src/construct_runtime/expand/event.rs | 2 +- .../src/construct_runtime/expand/origin.rs | 2 +- .../procedural/src/construct_runtime/mod.rs | 12 +- .../procedural/src/construct_runtime/parse.rs | 77 ++++--- frame/support/procedural/src/crate_version.rs | 2 +- .../support/procedural/src/debug_no_bound.rs | 2 +- .../procedural/src/dummy_part_checker.rs | 2 +- frame/support/procedural/src/key_prefix.rs | 2 +- .../procedural/src/match_and_insert.rs | 10 +- .../procedural/src/pallet/expand/error.rs | 2 +- .../procedural/src/pallet/expand/event.rs | 2 +- .../src/pallet/expand/genesis_build.rs | 2 +- .../src/pallet/expand/genesis_config.rs | 8 +- .../procedural/src/pallet/expand/storage.rs | 14 +- frame/support/procedural/src/pallet/mod.rs | 2 +- .../procedural/src/pallet/parse/call.rs | 29 ++- .../procedural/src/pallet/parse/config.rs | 30 ++- .../procedural/src/pallet/parse/error.rs | 8 +- .../procedural/src/pallet/parse/event.rs | 6 +- .../src/pallet/parse/extra_constants.rs | 16 +- .../src/pallet/parse/genesis_build.rs | 2 +- .../src/pallet/parse/genesis_config.rs | 6 +- .../procedural/src/pallet/parse/helper.rs | 18 +- .../procedural/src/pallet/parse/hooks.rs | 4 +- .../procedural/src/pallet/parse/inherent.rs | 8 +- .../procedural/src/pallet/parse/mod.rs | 51 ++--- .../procedural/src/pallet/parse/origin.rs | 6 +- .../src/pallet/parse/pallet_struct.rs | 8 +- .../procedural/src/pallet/parse/storage.rs | 84 ++++--- .../procedural/src/pallet/parse/type_value.rs | 12 +- .../src/pallet/parse/validate_unsigned.rs | 8 +- frame/support/procedural/src/pallet_error.rs | 22 +- .../procedural/src/partial_eq_no_bound.rs | 2 +- .../genesis_config/genesis_config_def.rs | 2 +- frame/support/procedural/src/storage/mod.rs | 32 ++- frame/support/procedural/src/storage/parse.rs | 28 +-- .../src/storage/print_pallet_upgrade.rs | 4 +- frame/support/procedural/src/storage_alias.rs | 24 +- frame/support/procedural/tools/src/syn_ext.rs | 2 +- frame/support/src/dispatch.rs | 24 +- frame/support/src/hash.rs | 4 +- frame/support/src/lib.rs | 2 +- frame/support/src/storage/child.rs | 25 +-- .../src/storage/generator/double_map.rs | 6 +- frame/support/src/storage/generator/map.rs | 6 +- frame/support/src/storage/generator/nmap.rs | 4 +- frame/support/src/storage/migration.rs | 6 +- frame/support/src/storage/mod.rs | 18 +- .../support/src/storage/storage_noop_guard.rs | 2 +- frame/support/src/storage/transactional.rs | 4 +- .../src/traits/tokens/fungible/balanced.rs | 4 +- .../src/traits/tokens/fungibles/balanced.rs | 4 +- frame/support/src/traits/tokens/imbalance.rs | 2 +- .../tokens/imbalance/signed_imbalance.rs | 10 +- frame/support/src/traits/try_runtime.rs | 5 +- .../test/tests/construct_runtime_ui.rs | 2 +- frame/support/test/tests/decl_module_ui.rs | 2 +- frame/support/test/tests/decl_storage_ui.rs | 2 +- .../support/test/tests/derive_no_bound_ui.rs | 2 +- frame/support/test/tests/pallet.rs | 8 +- .../test/tests/pallet_compatibility.rs | 5 +- .../tests/pallet_compatibility_instance.rs | 5 +- frame/support/test/tests/pallet_ui.rs | 2 +- frame/support/test/tests/storage_alias_ui.rs | 2 +- .../src/extensions/check_non_zero_sender.rs | 2 +- frame/system/src/extensions/check_nonce.rs | 4 +- frame/system/src/extensions/check_weight.rs | 37 ++-- frame/system/src/lib.rs | 8 +- frame/system/src/offchain.rs | 2 +- frame/tips/src/benchmarking.rs | 6 +- frame/tips/src/lib.rs | 8 +- frame/tips/src/migrations/v4.rs | 6 +- .../asset-tx-payment/src/payment.rs | 2 +- frame/transaction-payment/src/lib.rs | 15 +- frame/transaction-payment/src/payment.rs | 2 +- frame/transaction-storage/src/lib.rs | 4 +- frame/uniques/src/impl_nonfungibles.rs | 2 +- frame/uniques/src/lib.rs | 6 +- frame/utility/src/lib.rs | 14 +- frame/vesting/src/lib.rs | 14 +- frame/vesting/src/tests.rs | 8 +- frame/vesting/src/vesting_info.rs | 4 +- .../api/proc-macro/src/decl_runtime_apis.rs | 12 +- .../api/proc-macro/src/impl_runtime_apis.rs | 6 +- .../proc-macro/src/mock_impl_runtime_apis.rs | 18 +- primitives/api/proc-macro/src/utils.rs | 17 +- primitives/api/test/tests/trybuild.rs | 2 +- primitives/arithmetic/fuzzer/src/biguint.rs | 2 +- .../arithmetic/fuzzer/src/fixed_point.rs | 4 +- .../src/multiply_by_rational_with_rounding.rs | 2 +- primitives/arithmetic/fuzzer/src/normalize.rs | 2 +- primitives/arithmetic/src/biguint.rs | 16 +- primitives/arithmetic/src/fixed_point.rs | 14 +- primitives/arithmetic/src/helpers_128bit.rs | 6 +- primitives/arithmetic/src/lib.rs | 6 +- primitives/arithmetic/src/per_things.rs | 18 +- primitives/arithmetic/src/rational.rs | 4 +- primitives/authorship/src/lib.rs | 2 +- primitives/blockchain/src/backend.rs | 12 +- primitives/blockchain/src/header_metadata.rs | 8 +- primitives/consensus/babe/src/digests.rs | 5 +- primitives/consensus/babe/src/lib.rs | 12 +- primitives/consensus/vrf/src/schnorrkel.rs | 45 ++-- .../core/hashing/proc-macro/src/impls.rs | 12 +- .../core/src/bounded/bounded_btree_map.rs | 2 +- .../core/src/bounded/bounded_btree_set.rs | 2 +- primitives/core/src/bounded/bounded_vec.rs | 18 +- primitives/core/src/crypto.rs | 22 +- primitives/core/src/ecdsa.rs | 4 +- primitives/core/src/ed25519.rs | 4 +- primitives/core/src/lib.rs | 9 +- primitives/core/src/offchain/storage.rs | 5 +- primitives/core/src/offchain/testing.rs | 15 +- primitives/core/src/sr25519.rs | 4 +- primitives/database/src/kvdb.rs | 2 +- primitives/debug-derive/src/impls.rs | 5 +- primitives/finality-grandpa/src/lib.rs | 8 +- primitives/inherents/src/lib.rs | 8 +- primitives/io/src/batch_verifier.rs | 8 +- primitives/keystore/src/lib.rs | 8 +- primitives/keystore/src/testing.rs | 2 +- primitives/maybe-compressed-blob/src/lib.rs | 2 +- .../npos-elections/fuzzer/src/common.rs | 10 +- .../fuzzer/src/phragmen_balancing.rs | 8 +- .../fuzzer/src/phragmms_balancing.rs | 8 +- primitives/npos-elections/src/balancing.rs | 10 +- primitives/npos-elections/src/lib.rs | 2 +- primitives/npos-elections/src/mock.rs | 10 +- primitives/npos-elections/src/node.rs | 4 +- primitives/npos-elections/src/phragmen.rs | 2 +- primitives/npos-elections/src/phragmms.rs | 2 +- primitives/npos-elections/src/pjr.rs | 6 +- primitives/npos-elections/src/reduce.rs | 24 +- .../runtime-interface/proc-macro/src/lib.rs | 2 +- .../proc-macro/src/pass_by/enum_.rs | 5 +- .../proc-macro/src/pass_by/inner.rs | 4 +- .../host_function_interface.rs | 5 +- .../src/runtime_interface/trait_decl_impl.rs | 2 +- .../runtime-interface/proc-macro/src/utils.rs | 14 +- primitives/runtime-interface/src/impls.rs | 2 +- primitives/runtime-interface/tests/ui.rs | 2 +- primitives/runtime/src/curve.rs | 6 +- primitives/runtime/src/generic/digest.rs | 30 +-- primitives/runtime/src/generic/era.rs | 4 +- primitives/runtime/src/generic/header.rs | 10 +- .../src/generic/unchecked_extrinsic.rs | 6 +- primitives/runtime/src/lib.rs | 24 +- primitives/runtime/src/offchain/http.rs | 8 +- .../runtime/src/offchain/storage_lock.rs | 8 +- primitives/runtime/src/traits.rs | 4 +- .../runtime/src/transaction_validity.rs | 25 +-- primitives/sandbox/src/embedded_executor.rs | 10 +- primitives/state-machine/src/basic.rs | 15 +- primitives/state-machine/src/ext.rs | 16 +- primitives/state-machine/src/lib.rs | 87 ++++---- .../src/overlayed_changes/changeset.rs | 6 +- primitives/state-machine/src/trie_backend.rs | 4 +- .../state-machine/src/trie_backend_essence.rs | 32 ++- primitives/storage/src/lib.rs | 5 +- primitives/timestamp/src/lib.rs | 4 +- .../transaction-storage-proof/src/lib.rs | 9 +- primitives/trie/src/cache/mod.rs | 4 +- primitives/trie/src/cache/shared_cache.rs | 21 +- primitives/trie/src/node_codec.rs | 41 ++-- primitives/trie/src/node_header.rs | 26 +-- primitives/trie/src/trie_codec.rs | 14 +- primitives/trie/src/trie_stream.rs | 20 +- .../proc-macro/src/decl_runtime_version.rs | 25 +-- primitives/version/src/lib.rs | 10 +- test-utils/client/src/lib.rs | 4 +- test-utils/derive/src/lib.rs | 2 +- test-utils/runtime/src/lib.rs | 31 ++- test-utils/runtime/src/system.rs | 19 +- .../runtime/transaction-pool/src/lib.rs | 31 +-- utils/build-script-utils/src/git.rs | 4 +- utils/fork-tree/src/lib.rs | 80 +++---- .../frame/benchmarking-cli/src/block/bench.rs | 2 +- .../benchmarking-cli/src/extrinsic/bench.rs | 4 +- .../benchmarking-cli/src/extrinsic/cmd.rs | 7 +- .../frame/benchmarking-cli/src/machine/mod.rs | 2 +- .../benchmarking-cli/src/overhead/template.rs | 2 +- .../benchmarking-cli/src/pallet/command.rs | 36 ++- .../benchmarking-cli/src/pallet/writer.rs | 10 +- .../benchmarking-cli/src/shared/stats.rs | 2 +- .../src/shared/weight_params.rs | 2 +- .../frame/benchmarking-cli/src/storage/cmd.rs | 2 +- .../benchmarking-cli/src/storage/write.rs | 6 +- .../frame-utilities-cli/src/pallet_id.rs | 2 +- utils/frame/generate-bags/src/lib.rs | 4 +- utils/frame/remote-externalities/src/lib.rs | 30 +-- .../rpc/state-trie-migration-rpc/src/lib.rs | 9 +- utils/frame/try-runtime/cli/src/lib.rs | 27 +-- utils/wasm-builder/src/builder.rs | 6 +- utils/wasm-builder/src/lib.rs | 5 +- utils/wasm-builder/src/prerequisites.rs | 7 +- utils/wasm-builder/src/wasm_project.rs | 32 ++- 500 files changed, 3501 insertions(+), 4096 deletions(-) diff --git a/bin/node-template/node/src/command.rs b/bin/node-template/node/src/command.rs index c4d944d7f2250..6d293b7b85fcc 100644 --- a/bin/node-template/node/src/command.rs +++ b/bin/node-template/node/src/command.rs @@ -39,9 +39,8 @@ impl SubstrateCli for Cli { Ok(match id { "dev" => Box::new(chain_spec::development_config()?), "" | "local" => Box::new(chain_spec::local_testnet_config()?), - path => { - Box::new(chain_spec::ChainSpec::from_json_file(std::path::PathBuf::from(path))?) - }, + path => + Box::new(chain_spec::ChainSpec::from_json_file(std::path::PathBuf::from(path))?), }) } @@ -119,7 +118,7 @@ pub fn run() -> sc_cli::Result<()> { "Runtime benchmarking wasn't enabled when building the node. \ You can enable it with `--features runtime-benchmarks`." .into(), - ); + ) } cmd.run::(config) @@ -168,9 +167,8 @@ pub fn run() -> sc_cli::Result<()> { cmd.run(client, inherent_benchmark_data()?, Vec::new(), &ext_factory) }, - BenchmarkCmd::Machine(cmd) => { - cmd.run(&config, SUBSTRATE_REFERENCE_HARDWARE.clone()) - }, + BenchmarkCmd::Machine(cmd) => + cmd.run(&config, SUBSTRATE_REFERENCE_HARDWARE.clone()), } }) }, diff --git a/bin/node-template/node/src/service.rs b/bin/node-template/node/src/service.rs index adc453b615f3e..ee8464688c79c 100644 --- a/bin/node-template/node/src/service.rs +++ b/bin/node-template/node/src/service.rs @@ -59,7 +59,7 @@ pub fn new_partial( ServiceError, > { if config.keystore_remote.is_some() { - return Err(ServiceError::Other("Remote Keystores are not supported.".into())); + return Err(ServiceError::Other("Remote Keystores are not supported.".into())) } let telemetry = config @@ -170,12 +170,11 @@ pub fn new_full(mut config: Configuration) -> Result if let Some(url) = &config.keystore_remote { match remote_keystore(url) { Ok(k) => keystore_container.set_remote_keystore(k), - Err(e) => { + Err(e) => return Err(ServiceError::Other(format!( "Error hooking up remote keystore for {}: {}", url, e - ))) - }, + ))), }; } let grandpa_protocol_name = sc_finality_grandpa::protocol_standard_name( diff --git a/bin/node/bench/src/core.rs b/bin/node/bench/src/core.rs index 72e9507e97be9..b6ad3ecd80068 100644 --- a/bin/node/bench/src/core.rs +++ b/bin/node/bench/src/core.rs @@ -74,19 +74,19 @@ impl fmt::Display for NsFormatter { let v = self.0; if v < 100 { - return write!(f, "{} ns", v); + return write!(f, "{} ns", v) } if self.0 < 100_000 { - return write!(f, "{:.1} µs", v as f64 / 1000.0); + return write!(f, "{:.1} µs", v as f64 / 1000.0) } if self.0 < 1_000_000 { - return write!(f, "{:.4} ms", v as f64 / 1_000_000.0); + return write!(f, "{:.4} ms", v as f64 / 1_000_000.0) } if self.0 < 100_000_000 { - return write!(f, "{:.1} ms", v as f64 / 1_000_000.0); + return write!(f, "{:.1} ms", v as f64 / 1_000_000.0) } write!(f, "{:.4} s", v as f64 / 1_000_000_000.0) diff --git a/bin/node/bench/src/main.rs b/bin/node/bench/src/main.rs index a1c7e73d13e3f..8a5d99640eb1b 100644 --- a/bin/node/bench/src/main.rs +++ b/bin/node/bench/src/main.rs @@ -164,7 +164,7 @@ fn main() { println!("{}: {}", benchmark.name(), benchmark.path().full()) } } - return; + return } let mut results = Vec::new(); diff --git a/bin/node/bench/src/simple_trie.rs b/bin/node/bench/src/simple_trie.rs index 04f307821f33e..aa9c96a1cbd3f 100644 --- a/bin/node/bench/src/simple_trie.rs +++ b/bin/node/bench/src/simple_trie.rs @@ -45,7 +45,7 @@ impl<'a> HashDB for SimpleTrie<'a> { fn get(&self, key: &Hash, prefix: Prefix) -> Option { let key = sp_trie::prefixed_key::(key, prefix); if let Some(value) = self.overlay.get(&key) { - return value.clone(); + return value.clone() } self.db.get(0, &key).expect("Database backend error") } diff --git a/bin/node/bench/src/tempdb.rs b/bin/node/bench/src/tempdb.rs index 5fa052ab16563..eb3bb1d3fccd7 100644 --- a/bin/node/bench/src/tempdb.rs +++ b/bin/node/bench/src/tempdb.rs @@ -46,9 +46,8 @@ impl KeyValueDB for ParityDbWrapper { fn write(&self, transaction: DBTransaction) -> io::Result<()> { self.0 .commit(transaction.ops.iter().map(|op| match op { - kvdb::DBOp::Insert { col, key, value } => { - (*col as u8, &key[key.len() - 32..], Some(value.to_vec())) - }, + kvdb::DBOp::Insert { col, key, value } => + (*col as u8, &key[key.len() - 32..], Some(value.to_vec())), kvdb::DBOp::Delete { col, key } => (*col as u8, &key[key.len() - 32..], None), kvdb::DBOp::DeletePrefix { col: _, prefix: _ } => unimplemented!(), })) diff --git a/bin/node/cli/src/command.rs b/bin/node/cli/src/command.rs index ac3057df89e78..108d7743843b6 100644 --- a/bin/node/cli/src/command.rs +++ b/bin/node/cli/src/command.rs @@ -59,19 +59,17 @@ impl SubstrateCli for Cli { fn load_spec(&self, id: &str) -> std::result::Result, String> { let spec = match id { - "" => { + "" => return Err( "Please specify which chain you want to run, e.g. --dev or --chain=local" .into(), - ) - }, + ), "dev" => Box::new(chain_spec::development_config()), "local" => Box::new(chain_spec::local_testnet_config()), "fir" | "flaming-fir" => Box::new(chain_spec::flaming_fir_config()?), "staging" => Box::new(chain_spec::staging_testnet_config()), - path => { - Box::new(chain_spec::ChainSpec::from_json_file(std::path::PathBuf::from(path))?) - }, + path => + Box::new(chain_spec::ChainSpec::from_json_file(std::path::PathBuf::from(path))?), }; Ok(spec) } @@ -111,7 +109,7 @@ pub fn run() -> Result<()> { "Runtime benchmarking wasn't enabled when building the node. \ You can enable it with `--features runtime-benchmarks`." .into(), - ); + ) } cmd.run::(config) @@ -168,9 +166,8 @@ pub fn run() -> Result<()> { &ext_factory, ) }, - BenchmarkCmd::Machine(cmd) => { - cmd.run(&config, SUBSTRATE_REFERENCE_HARDWARE.clone()) - }, + BenchmarkCmd::Machine(cmd) => + cmd.run(&config, SUBSTRATE_REFERENCE_HARDWARE.clone()), } }) }, diff --git a/bin/node/cli/src/service.rs b/bin/node/cli/src/service.rs index 5a95bd4228fbe..6c29f0c08ee13 100644 --- a/bin/node/cli/src/service.rs +++ b/bin/node/cli/src/service.rs @@ -686,7 +686,7 @@ mod tests { sc_consensus_babe::authorship::claim_slot(slot.into(), &epoch, &keystore) .map(|(digest, _)| digest) { - break (babe_pre_digest, epoch_descriptor); + break (babe_pre_digest, epoch_descriptor) } slot += 1; diff --git a/bin/node/cli/tests/common.rs b/bin/node/cli/tests/common.rs index 7da4eea5191a3..358c09779d59a 100644 --- a/bin/node/cli/tests/common.rs +++ b/bin/node/cli/tests/common.rs @@ -47,7 +47,7 @@ pub fn wait_for(child: &mut Child, secs: u64) -> Result { let result = wait_timeout::ChildExt::wait_timeout(child, Duration::from_secs(secs - 5)) .map_err(|_| ())?; if let Some(exit_status) = result { - return Ok(exit_status); + return Ok(exit_status) } } eprintln!("Took too long to exit (> {} seconds). Killing...", secs); @@ -78,7 +78,7 @@ pub async fn wait_n_finalized_blocks_from(n: usize, url: &str) { if let Ok(block) = ChainApi::<(), Hash, Header, ()>::finalized_head(&rpc).await { built_blocks.insert(block); if built_blocks.len() > n { - break; + break } }; interval.tick().await; diff --git a/bin/node/cli/tests/telemetry.rs b/bin/node/cli/tests/telemetry.rs index fb9e0a9d38858..bef4e4ea03048 100644 --- a/bin/node/cli/tests/telemetry.rs +++ b/bin/node/cli/tests/telemetry.rs @@ -54,7 +54,7 @@ async fn telemetry_works() { let object = json.as_object().unwrap().get("payload").unwrap().as_object().unwrap(); if matches!(object.get("best"), Some(serde_json::Value::String(_))) { - break; + break } }, diff --git a/bin/node/inspect/src/lib.rs b/bin/node/inspect/src/lib.rs index c86dbe2d15e3b..528dce14f46a5 100644 --- a/bin/node/inspect/src/lib.rs +++ b/bin/node/inspect/src/lib.rs @@ -210,12 +210,12 @@ impl FromStr for BlockAddress { fn from_str(s: &str) -> Result { // try to parse hash first if let Ok(hash) = s.parse() { - return Ok(Self::Hash(hash)); + return Ok(Self::Hash(hash)) } // then number if let Ok(number) = s.parse() { - return Ok(Self::Number(number)); + return Ok(Self::Number(number)) } // then assume it's bytes (hex-encoded) @@ -243,7 +243,7 @@ impl FromStr for ExtrinsicAddres fn from_str(s: &str) -> Result { // first try raw bytes if let Ok(bytes) = sp_core::bytes::from_hex(s).map(Self::Bytes) { - return Ok(bytes); + return Ok(bytes) } // split by a bunch of different characters diff --git a/bin/node/runtime/src/impls.rs b/bin/node/runtime/src/impls.rs index 64089e672263d..b3f58ea5d24ab 100644 --- a/bin/node/runtime/src/impls.rs +++ b/bin/node/runtime/src/impls.rs @@ -256,7 +256,7 @@ mod multiplier_tests { let next = runtime_multiplier_update(fm); fm = next; if fm == min_multiplier() { - break; + break } iterations += 1; } @@ -284,8 +284,8 @@ mod multiplier_tests { // `cargo test congested_chain_simulation -- --nocapture` to get some insight. // almost full. The entire quota of normal transactions is taken. - let block_weight = BlockWeights::get().get(DispatchClass::Normal).max_total.unwrap() - - Weight::from_ref_time(100); + let block_weight = BlockWeights::get().get(DispatchClass::Normal).max_total.unwrap() - + Weight::from_ref_time(100); // Default substrate weight. let tx_weight = frame_support::weights::constants::ExtrinsicBaseWeight::get(); diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index 4c24828644fad..e63b0f2f38d28 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -296,21 +296,21 @@ impl InstanceFilter for ProxyType { ProxyType::Any => true, ProxyType::NonTransfer => !matches!( c, - RuntimeCall::Balances(..) - | RuntimeCall::Assets(..) - | RuntimeCall::Uniques(..) - | RuntimeCall::Nfts(..) - | RuntimeCall::Vesting(pallet_vesting::Call::vested_transfer { .. }) - | RuntimeCall::Indices(pallet_indices::Call::transfer { .. }) + RuntimeCall::Balances(..) | + RuntimeCall::Assets(..) | + RuntimeCall::Uniques(..) | + RuntimeCall::Nfts(..) | + RuntimeCall::Vesting(pallet_vesting::Call::vested_transfer { .. }) | + RuntimeCall::Indices(pallet_indices::Call::transfer { .. }) ), ProxyType::Governance => matches!( c, - RuntimeCall::Democracy(..) - | RuntimeCall::Council(..) - | RuntimeCall::Society(..) - | RuntimeCall::TechnicalCommittee(..) - | RuntimeCall::Elections(..) - | RuntimeCall::Treasury(..) + RuntimeCall::Democracy(..) | + RuntimeCall::Council(..) | + RuntimeCall::Society(..) | + RuntimeCall::TechnicalCommittee(..) | + RuntimeCall::Elections(..) | + RuntimeCall::Treasury(..) ), ProxyType::Staking => matches!(c, RuntimeCall::Staking(..)), } @@ -670,8 +670,8 @@ impl Get> for OffchainRandomBalancing { max => { let seed = sp_io::offchain::random_seed(); let random = ::decode(&mut TrailingZeroInput::new(&seed)) - .expect("input is padded with zeroes; qed") - % max.saturating_add(1); + .expect("input is padded with zeroes; qed") % + max.saturating_add(1); random as usize }, }; diff --git a/bin/node/testing/src/bench.rs b/bin/node/testing/src/bench.rs index 4ce6fa518222b..59f1fa94c9b20 100644 --- a/bin/node/testing/src/bench.rs +++ b/bin/node/testing/src/bench.rs @@ -95,7 +95,7 @@ pub fn drop_system_cache() { target: "bench-logistics", "Clearing system cache on windows is not supported. Benchmark might totally be wrong.", ); - return; + return } std::process::Command::new("sync") @@ -291,7 +291,7 @@ impl<'a> Iterator for BlockContentIterator<'a> { fn next(&mut self) -> Option { if self.content.size.map(|size| size <= self.iteration).unwrap_or(false) { - return None; + return None } let sender = self.keyring.at(self.iteration); @@ -307,24 +307,22 @@ impl<'a> Iterator for BlockContentIterator<'a> { signed_extra(0, kitchensink_runtime::ExistentialDeposit::get() + 1), )), function: match self.content.block_type { - BlockType::RandomTransfersKeepAlive => { + BlockType::RandomTransfersKeepAlive => RuntimeCall::Balances(BalancesCall::transfer_keep_alive { dest: sp_runtime::MultiAddress::Id(receiver), value: kitchensink_runtime::ExistentialDeposit::get() + 1, - }) - }, + }), BlockType::RandomTransfersReaping => { RuntimeCall::Balances(BalancesCall::transfer { dest: sp_runtime::MultiAddress::Id(receiver), // Transfer so that ending balance would be 1 less than existential // deposit so that we kill the sender account. - value: 100 * DOLLARS - - (kitchensink_runtime::ExistentialDeposit::get() - 1), + value: 100 * DOLLARS - + (kitchensink_runtime::ExistentialDeposit::get() - 1), }) }, - BlockType::Noop => { - RuntimeCall::System(SystemCall::remark { remark: Vec::new() }) - }, + BlockType::Noop => + RuntimeCall::System(SystemCall::remark { remark: Vec::new() }), }, }, self.runtime_version.spec_version, diff --git a/client/allocator/src/freeing_bump.rs b/client/allocator/src/freeing_bump.rs index c1bc1bc9eb6f7..e81d1b79e74ed 100644 --- a/client/allocator/src/freeing_bump.rs +++ b/client/allocator/src/freeing_bump.rs @@ -141,7 +141,7 @@ impl Order { fn from_size(size: u32) -> Result { let clamped_size = if size > MAX_POSSIBLE_ALLOCATION { log::warn!(target: LOG_TARGET, "going to fail due to allocating {:?}", size); - return Err(Error::RequestedAllocationTooLarge); + return Err(Error::RequestedAllocationTooLarge) } else if size < MIN_POSSIBLE_ALLOCATION { MIN_POSSIBLE_ALLOCATION } else { @@ -401,7 +401,7 @@ impl FreeingBumpHeapAllocator { size: WordSize, ) -> Result, Error> { if self.poisoned { - return Err(error("the allocator has been poisoned")); + return Err(error("the allocator has been poisoned")) } let bomb = PoisonBomb { poisoned: &mut self.poisoned }; @@ -463,7 +463,7 @@ impl FreeingBumpHeapAllocator { ptr: Pointer, ) -> Result<(), Error> { if self.poisoned { - return Err(error("the allocator has been poisoned")); + return Err(error("the allocator has been poisoned")) } let bomb = PoisonBomb { poisoned: &mut self.poisoned }; @@ -511,7 +511,7 @@ impl FreeingBumpHeapAllocator { bumper, heap_end ); - return Err(Error::AllocatorOutOfSpace); + return Err(Error::AllocatorOutOfSpace) } let res = *bumper; @@ -524,7 +524,7 @@ impl FreeingBumpHeapAllocator { mem: &mut M, ) -> Result<(), Error> { if mem.size() < *last_observed_memory_size { - return Err(Error::MemoryShrinked); + return Err(Error::MemoryShrinked) } *last_observed_memory_size = mem.size(); Ok(()) diff --git a/client/api/src/backend.rs b/client/api/src/backend.rs index 226717b54f23c..79cc0d7a16bcc 100644 --- a/client/api/src/backend.rs +++ b/client/api/src/backend.rs @@ -346,7 +346,7 @@ where // this terminates the iterator the first time it fails. if let Some(prefix) = self.prefix { if !next_key.starts_with(&prefix.0[..]) { - return None; + return None } } self.current_key = next_key.clone(); diff --git a/client/api/src/execution_extensions.rs b/client/api/src/execution_extensions.rs index 971cef2dadbdf..07a483bc3eaf2 100644 --- a/client/api/src/execution_extensions.rs +++ b/client/api/src/execution_extensions.rs @@ -176,8 +176,8 @@ impl ExecutionExtensions { } } - if capabilities.contains(offchain::Capabilities::OFFCHAIN_DB_READ) - || capabilities.contains(offchain::Capabilities::OFFCHAIN_DB_WRITE) + if capabilities.contains(offchain::Capabilities::OFFCHAIN_DB_READ) || + capabilities.contains(offchain::Capabilities::OFFCHAIN_DB_WRITE) { if let Some(offchain_db) = self.offchain_db.as_ref() { extensions.register(OffchainDbExt::new(offchain::LimitedExternalities::new( @@ -210,9 +210,8 @@ impl ExecutionExtensions { ExecutionContext::BlockConstruction => self.strategies.block_construction.get_manager(), ExecutionContext::Syncing => self.strategies.syncing.get_manager(), ExecutionContext::Importing => self.strategies.importing.get_manager(), - ExecutionContext::OffchainCall(Some((_, capabilities))) if capabilities.is_all() => { - self.strategies.offchain_worker.get_manager() - }, + ExecutionContext::OffchainCall(Some((_, capabilities))) if capabilities.is_all() => + self.strategies.offchain_worker.get_manager(), ExecutionContext::OffchainCall(_) => self.strategies.other.get_manager(), }; @@ -232,7 +231,7 @@ impl offchain::TransactionPool for TransactionPoolAdapter< Ok(xt) => xt, Err(e) => { log::warn!("Unable to decode extrinsic: {:?}: {}", data, e); - return Err(()); + return Err(()) }, }; diff --git a/client/api/src/in_mem.rs b/client/api/src/in_mem.rs index 93adb1b07f28b..5a3e25ab5987b 100644 --- a/client/api/src/in_mem.rs +++ b/client/api/src/in_mem.rs @@ -198,7 +198,7 @@ impl Blockchain { pub fn equals_to(&self, other: &Self) -> bool { // Check ptr equality first to avoid double read locks. if ptr::eq(self, other) { - return true; + return true } self.canon_equals_to(other) && self.storage.read().blocks == other.storage.read().blocks } @@ -207,14 +207,14 @@ impl Blockchain { pub fn canon_equals_to(&self, other: &Self) -> bool { // Check ptr equality first to avoid double read locks. if ptr::eq(self, other) { - return true; + return true } let this = self.storage.read(); let other = other.storage.read(); - this.hashes == other.hashes - && this.best_hash == other.best_hash - && this.best_number == other.best_number - && this.genesis_hash == other.genesis_hash + this.hashes == other.hashes && + this.best_hash == other.best_hash && + this.best_number == other.best_number && + this.genesis_hash == other.genesis_hash } /// Insert header CHT root. @@ -313,7 +313,7 @@ impl Blockchain { if !stored_justifications.append(justification) { return Err(sp_blockchain::Error::BadJustification( "Duplicate consensus engine ID".into(), - )); + )) } } else { *block_justifications = Some(Justifications::from(justification)); @@ -761,7 +761,7 @@ where fn state_at(&self, hash: Block::Hash) -> sp_blockchain::Result { if hash == Default::default() { - return Ok(Self::State::default()); + return Ok(Self::State::default()) } self.states @@ -797,7 +797,7 @@ impl backend::LocalBackend for Backend where Block: /// Check that genesis storage is valid. pub fn check_genesis_storage(storage: &Storage) -> sp_blockchain::Result<()> { if storage.top.iter().any(|(k, _)| well_known_keys::is_child_storage_key(k)) { - return Err(sp_blockchain::Error::InvalidState); + return Err(sp_blockchain::Error::InvalidState) } if storage @@ -805,7 +805,7 @@ pub fn check_genesis_storage(storage: &Storage) -> sp_blockchain::Result<()> { .keys() .any(|child_key| !well_known_keys::is_child_storage_key(child_key)) { - return Err(sp_blockchain::Error::InvalidState); + return Err(sp_blockchain::Error::InvalidState) } Ok(()) diff --git a/client/api/src/leaves.rs b/client/api/src/leaves.rs index 3766e19c51add..cdcb80a110b74 100644 --- a/client/api/src/leaves.rs +++ b/client/api/src/leaves.rs @@ -135,7 +135,7 @@ where let number = Reverse(number); if !self.remove_leaf(&number, &hash) { - return None; + return None } let inserted = parent_hash.and_then(|parent_hash| { @@ -160,7 +160,7 @@ where /// will be pruned soon afterwards anyway. pub fn finalize_height(&mut self, number: N) -> FinalizationOutcome { let boundary = if number == N::zero() { - return FinalizationOutcome { removed: BTreeMap::new() }; + return FinalizationOutcome { removed: BTreeMap::new() } } else { number - N::one() }; @@ -176,7 +176,7 @@ where /// Returns the leaves that would be displaced by finalizing the given block. pub fn displaced_by_finalize_height(&self, number: N) -> FinalizationOutcome { let boundary = if number == N::zero() { - return FinalizationOutcome { removed: BTreeMap::new() }; + return FinalizationOutcome { removed: BTreeMap::new() } } else { number - N::one() }; diff --git a/client/api/src/lib.rs b/client/api/src/lib.rs index 0ab9738153569..3d21f12f6940b 100644 --- a/client/api/src/lib.rs +++ b/client/api/src/lib.rs @@ -66,7 +66,7 @@ pub mod utils { { move |base, hash| { if base == hash { - return Ok(false); + return Ok(false) } let current = current.as_ref().map(|(c, p)| (c.borrow(), p.borrow())); @@ -74,11 +74,11 @@ pub mod utils { let mut hash = hash; if let Some((current_hash, current_parent_hash)) = current { if base == current_hash { - return Ok(false); + return Ok(false) } if hash == current_hash { if base == current_parent_hash { - return Ok(true); + return Ok(true) } else { hash = current_parent_hash; } diff --git a/client/api/src/notifications/registry.rs b/client/api/src/notifications/registry.rs index 721bb67198ad4..882d6ed40be67 100644 --- a/client/api/src/notifications/registry.rs +++ b/client/api/src/notifications/registry.rs @@ -183,7 +183,7 @@ impl Registry { // early exit if no listeners if !has_wildcard && self.listeners.is_empty() && self.child_listeners.is_empty() { - return; + return } let mut subscribers = self.wildcard_listeners.clone(); @@ -229,7 +229,7 @@ impl Registry { // Don't send empty notifications if changes.is_empty() && child_changes.is_empty() { - return; + return } let changes = Arc::<[_]>::from(changes); @@ -299,7 +299,7 @@ impl Registry { None => { wildcards.remove(&subscriber); }, - Some(filters) => { + Some(filters) => for key in filters.iter() { let remove_key = match listeners.get_mut(key) { Some(ref mut set) => { @@ -312,8 +312,7 @@ impl Registry { if remove_key { listeners.remove(key); } - } - }, + }, } } diff --git a/client/authority-discovery/src/worker.rs b/client/authority-discovery/src/worker.rs index 92912081e6347..4121b64e00b9b 100644 --- a/client/authority-discovery/src/worker.rs +++ b/client/authority-discovery/src/worker.rs @@ -307,7 +307,7 @@ where .into_iter() .filter(move |a| { if publish_non_global_ips { - return true; + return true } a.iter().all(|p| match p { @@ -343,7 +343,7 @@ where ).await?.into_iter().map(Into::into).collect::>(); if only_if_changed && keys == self.latest_published_keys { - return Ok(()); + return Ok(()) } let addresses = serialize_addresses(self.addresses_to_publish()); @@ -515,7 +515,7 @@ where .map_err(Error::EncodingDecodingScale)?; if !AuthorityPair::verify(&auth_signature, &record, &authority_id) { - return Err(Error::VerifyingDhtPayload); + return Err(Error::VerifyingDhtPayload) } let addresses: Vec = schema::AuthorityRecord::decode(record.as_slice()) @@ -554,10 +554,10 @@ where let signature = Signature { public_key, bytes: peer_signature.signature }; if !signature.verify(record, &remote_peer_id) { - return Err(Error::VerifyingDhtPayload); + return Err(Error::VerifyingDhtPayload) } } else if self.strict_record_validation { - return Err(Error::MissingPeerIdSignature); + return Err(Error::MissingPeerIdSignature) } else { debug!( target: LOG_TARGET, diff --git a/client/authority-discovery/src/worker/addr_cache.rs b/client/authority-discovery/src/worker/addr_cache.rs index f6688504b1026..19bbbf0b62e7e 100644 --- a/client/authority-discovery/src/worker/addr_cache.rs +++ b/client/authority-discovery/src/worker/addr_cache.rs @@ -59,7 +59,7 @@ impl AddrCache { addresses, ); - return; + return } else if peer_ids.len() > 1 { log::warn!( target: super::LOG_TARGET, @@ -144,7 +144,7 @@ impl AddrCache { { addresses } else { - continue; + continue }; self.remove_authority_id_from_peer_ids( diff --git a/client/basic-authorship/src/basic_authorship.rs b/client/basic-authorship/src/basic_authorship.rs index 45f2331a24107..b69294bf6ccb0 100644 --- a/client/basic-authorship/src/basic_authorship.rs +++ b/client/basic-authorship/src/basic_authorship.rs @@ -367,7 +367,7 @@ where error!( "❌️ Mandatory inherent extrinsic returned error. Block cannot be produced." ); - return Err(ApplyExtrinsicFailed(Validity(e))); + return Err(ApplyExtrinsicFailed(Validity(e))) }, Err(e) => { warn!("❗️ Inherent extrinsic returned unexpected error: {}. Dropping.", e); @@ -413,7 +413,7 @@ where let pending_tx = if let Some(pending_tx) = pending_iterator.next() { pending_tx } else { - break EndProposingReason::NoMoreTransactions; + break EndProposingReason::NoMoreTransactions }; let now = (self.now)(); @@ -422,7 +422,7 @@ where "Consensus deadline reached when pushing block transactions, \ proceeding with proposing." ); - break EndProposingReason::HitDeadline; + break EndProposingReason::HitDeadline } let pending_tx_data = pending_tx.data().clone(); @@ -439,17 +439,17 @@ where but will try {} more transactions before quitting.", MAX_SKIPPED_TRANSACTIONS - skipped, ); - continue; + continue } else if now < soft_deadline { debug!( "Transaction would overflow the block size limit, \ but we still have time before the soft deadline, so \ we will try a bit more." ); - continue; + continue } else { debug!("Reached block size limit, proceeding with proposing."); - break EndProposingReason::HitBlockSizeLimit; + break EndProposingReason::HitBlockSizeLimit } } @@ -474,7 +474,7 @@ where ); } else { debug!("Reached block weight limit, proceeding with proposing."); - break EndProposingReason::HitBlockWeightLimit; + break EndProposingReason::HitBlockWeightLimit } }, Err(e) if skipped > 0 => { @@ -633,7 +633,7 @@ mod tests { let mut value = cell.lock(); if !value.0 { value.0 = true; - return value.1; + return value.1 } let old = value.1; let new = old + time::Duration::from_secs(1); @@ -677,7 +677,7 @@ mod tests { let mut value = cell.lock(); if !value.0 { value.0 = true; - return value.1; + return value.1 } let new = value.1 + time::Duration::from_secs(160); *value = (true, new); @@ -868,13 +868,13 @@ mod tests { .chain((0..extrinsics_num - 1).map(|v| Extrinsic::IncludeData(vec![v as u8; 10]))) .collect::>(); - let block_limit = genesis_header.encoded_size() - + extrinsics + let block_limit = genesis_header.encoded_size() + + extrinsics .iter() .take(extrinsics_num - 1) .map(Encode::encoded_size) - .sum::() - + Vec::::new().encoded_size(); + .sum::() + + Vec::::new().encoded_size(); block_on(txpool.submit_at(&BlockId::number(0), SOURCE, extrinsics)).unwrap(); diff --git a/client/beefy/rpc/src/lib.rs b/client/beefy/rpc/src/lib.rs index 55767f8c1b3f0..d29ed433c38db 100644 --- a/client/beefy/rpc/src/lib.rs +++ b/client/beefy/rpc/src/lib.rs @@ -234,7 +234,7 @@ mod tests { if response.result != not_ready { assert_eq!(response.result, expected); // Success - return; + return } std::thread::sleep(std::time::Duration::from_millis(50)) } diff --git a/client/beefy/src/communication/gossip.rs b/client/beefy/src/communication/gossip.rs index 627b11257fa7a..bbc35ac8e526e 100644 --- a/client/beefy/src/communication/gossip.rs +++ b/client/beefy/src/communication/gossip.rs @@ -160,18 +160,18 @@ where let known_votes = self.known_votes.read(); if !known_votes.is_live(&round) { - return ValidationResult::Discard; + return ValidationResult::Discard } if known_votes.is_known(&round, &msg_hash) { - return ValidationResult::ProcessAndKeep(self.topic); + return ValidationResult::ProcessAndKeep(self.topic) } } if BeefyKeystore::verify(&msg.id, &msg.signature, &msg.commitment.encode()) { self.known_votes.write().add_known(&round, msg_hash); self.known_peers.lock().note_vote_for(*sender, round); - return ValidationResult::ProcessAndKeep(self.topic); + return ValidationResult::ProcessAndKeep(self.topic) } else { // TODO: report peer debug!(target: "beefy", "🥩 Bad signature on message: {:?}, from: {:?}", msg, sender); @@ -215,7 +215,7 @@ where let known_votes = self.known_votes.read(); Box::new(move |_who, intent, _topic, mut data| { if let MessageIntent::PeriodicRebroadcast = intent { - return do_rebroadcast; + return do_rebroadcast } let msg = match VoteMessage::, Public, Signature>::decode(&mut data) { diff --git a/client/beefy/src/communication/request_response/incoming_requests_handler.rs b/client/beefy/src/communication/request_response/incoming_requests_handler.rs index d3ff1379ba656..9f02b7162b54c 100644 --- a/client/beefy/src/communication/request_response/incoming_requests_handler.rs +++ b/client/beefy/src/communication/request_response/incoming_requests_handler.rs @@ -76,9 +76,9 @@ impl IncomingRequest { sent_feedback: None, }; if let Err(_) = pending_response.send(response) { - return Err(Error::DecodingErrorNoReputationChange(peer, err)); + return Err(Error::DecodingErrorNoReputationChange(peer, err)) } - return Err(Error::DecodingError(peer, err)); + return Err(Error::DecodingError(peer, err)) }, }; Ok(Self::new(peer, payload, pending_response)) diff --git a/client/beefy/src/communication/request_response/outgoing_requests_engine.rs b/client/beefy/src/communication/request_response/outgoing_requests_engine.rs index 3670d8f1da2a1..c4d3c926190e6 100644 --- a/client/beefy/src/communication/request_response/outgoing_requests_engine.rs +++ b/client/beefy/src/communication/request_response/outgoing_requests_engine.rs @@ -94,7 +94,7 @@ where let live = self.live_peers.lock(); while let Some(peer) = self.peers_cache.pop_front() { if live.contains(&peer) { - return Some(peer); + return Some(peer) } } None @@ -122,7 +122,7 @@ where pub fn request(&mut self, block: NumberFor) { // ignore new requests while there's already one pending if matches!(self.state, State::AwaitingResponse(_, _, _)) { - return; + return } self.reset_peers_cache_for_block(block); @@ -193,7 +193,7 @@ where State::Idle => { futures::pending!(); // Doesn't happen as 'futures::pending!()' is an 'await' barrier that never passes. - return None; + return None }, State::AwaitingResponse(peer, block, receiver) => { let resp = receiver.await; diff --git a/client/beefy/src/justification.rs b/client/beefy/src/justification.rs index 5ddd75f3bee1b..7243c692727f0 100644 --- a/client/beefy/src/justification.rs +++ b/client/beefy/src/justification.rs @@ -48,11 +48,11 @@ fn verify_with_validator_set( ) -> Result<(), ConsensusError> { match proof { VersionedFinalityProof::V1(signed_commitment) => { - if signed_commitment.signatures.len() != validator_set.len() - || signed_commitment.commitment.validator_set_id != validator_set.id() - || signed_commitment.commitment.block_number != target_number + if signed_commitment.signatures.len() != validator_set.len() || + signed_commitment.commitment.validator_set_id != validator_set.id() || + signed_commitment.commitment.block_number != target_number { - return Err(ConsensusError::InvalidJustification); + return Err(ConsensusError::InvalidJustification) } // Arrangement of signatures in the commitment should be in the same order diff --git a/client/beefy/src/round.rs b/client/beefy/src/round.rs index 0b375b2e6a896..7a8cc4171a155 100644 --- a/client/beefy/src/round.rs +++ b/client/beefy/src/round.rs @@ -38,7 +38,7 @@ struct RoundTracker { impl RoundTracker { fn add_vote(&mut self, vote: (Public, Signature), self_vote: bool) -> bool { if self.votes.contains_key(&vote.0) { - return false; + return false } self.self_vote = self.self_vote || self_vote; @@ -106,8 +106,8 @@ where } pub(crate) fn should_self_vote(&self, round: &(P, NumberFor)) -> bool { - Some(round.1) > self.best_done - && self.rounds.get(round).map(|tracker| !tracker.has_self_vote()).unwrap_or(true) + Some(round.1) > self.best_done && + self.rounds.get(round).map(|tracker| !tracker.has_self_vote()).unwrap_or(true) } pub(crate) fn add_vote( diff --git a/client/beefy/src/worker.rs b/client/beefy/src/worker.rs index 05bc11b4e2072..6726fa4375387 100644 --- a/client/beefy/src/worker.rs +++ b/client/beefy/src/worker.rs @@ -222,7 +222,7 @@ impl VoterOracle { r } else { debug!(target: "beefy", "🥩 No voting round started"); - return None; + return None }; // `target` is guaranteed > `best_beefy` since `min_block_delta` is at least `1`. @@ -709,7 +709,7 @@ where hash } else { warn!(target: "beefy", "🥩 No MMR root digest found for: {:?}", target_hash); - return Ok(()); + return Ok(()) }; let rounds = self @@ -719,7 +719,7 @@ where .ok_or(Error::UninitSession)?; if !rounds.should_self_vote(&(payload.clone(), target_number)) { debug!(target: "beefy", "🥩 Don't double vote for block number: {:?}", target_number); - return Ok(()); + return Ok(()) } let (validators, validator_set_id) = (rounds.validators(), rounds.validator_set_id()); @@ -728,7 +728,7 @@ where id } else { debug!(target: "beefy", "🥩 Missing validator id - can't vote for: {:?}", target_hash); - return Ok(()); + return Ok(()) }; let commitment = Commitment { payload, block_number: target_number, validator_set_id }; @@ -738,7 +738,7 @@ where Ok(sig) => sig, Err(err) => { warn!(target: "beefy", "🥩 Error signing commitment: {:?}", err); - return Ok(()); + return Ok(()) }, }; diff --git a/client/chain-spec/derive/src/impls.rs b/client/chain-spec/derive/src/impls.rs index 083e87f092961..7af403d46ad10 100644 --- a/client/chain-spec/derive/src/impls.rs +++ b/client/chain-spec/derive/src/impls.rs @@ -97,7 +97,7 @@ pub fn group_derive(ast: &DeriveInput) -> proc_macro::TokenStream { Error::new(Span::call_site(), &format!("Could not find `serde` crate: {}", e)) .to_compile_error(); - return quote!( #err ); + return quote!( #err ) }, }; @@ -176,7 +176,7 @@ pub fn derive( Ok(FoundCrate::Name(chain_spec_name)) => chain_spec_name, Err(e) => { let err = Error::new(Span::call_site(), &e).to_compile_error(); - return quote!( #err ).into(); + return quote!( #err ).into() }, }; let crate_name = Ident::new(&crate_name, Span::call_site()); diff --git a/client/cli/src/arg_enums.rs b/client/cli/src/arg_enums.rs index 74bf1cb198154..20f68bc7fb55e 100644 --- a/client/cli/src/arg_enums.rs +++ b/client/cli/src/arg_enums.rs @@ -80,21 +80,16 @@ pub fn execution_method_from_cli( WasmExecutionMethod::Interpreted => sc_service::config::WasmExecutionMethod::Interpreted, WasmExecutionMethod::Compiled => sc_service::config::WasmExecutionMethod::Compiled { instantiation_strategy: match _instantiation_strategy { - WasmtimeInstantiationStrategy::PoolingCopyOnWrite => { - sc_service::config::WasmtimeInstantiationStrategy::PoolingCopyOnWrite - }, - WasmtimeInstantiationStrategy::RecreateInstanceCopyOnWrite => { - sc_service::config::WasmtimeInstantiationStrategy::RecreateInstanceCopyOnWrite - }, - WasmtimeInstantiationStrategy::Pooling => { - sc_service::config::WasmtimeInstantiationStrategy::Pooling - }, - WasmtimeInstantiationStrategy::RecreateInstance => { - sc_service::config::WasmtimeInstantiationStrategy::RecreateInstance - }, - WasmtimeInstantiationStrategy::LegacyInstanceReuse => { - sc_service::config::WasmtimeInstantiationStrategy::LegacyInstanceReuse - }, + WasmtimeInstantiationStrategy::PoolingCopyOnWrite => + sc_service::config::WasmtimeInstantiationStrategy::PoolingCopyOnWrite, + WasmtimeInstantiationStrategy::RecreateInstanceCopyOnWrite => + sc_service::config::WasmtimeInstantiationStrategy::RecreateInstanceCopyOnWrite, + WasmtimeInstantiationStrategy::Pooling => + sc_service::config::WasmtimeInstantiationStrategy::Pooling, + WasmtimeInstantiationStrategy::RecreateInstance => + sc_service::config::WasmtimeInstantiationStrategy::RecreateInstance, + WasmtimeInstantiationStrategy::LegacyInstanceReuse => + sc_service::config::WasmtimeInstantiationStrategy::LegacyInstanceReuse, }, }, } @@ -259,12 +254,10 @@ impl Into for SyncMode { fn into(self) -> sc_network::config::SyncMode { match self { SyncMode::Full => sc_network::config::SyncMode::Full, - SyncMode::Fast => { - sc_network::config::SyncMode::Fast { skip_proofs: false, storage_chain_mode: false } - }, - SyncMode::FastUnsafe => { - sc_network::config::SyncMode::Fast { skip_proofs: true, storage_chain_mode: false } - }, + SyncMode::Fast => + sc_network::config::SyncMode::Fast { skip_proofs: false, storage_chain_mode: false }, + SyncMode::FastUnsafe => + sc_network::config::SyncMode::Fast { skip_proofs: true, storage_chain_mode: false }, SyncMode::Warp => sc_network::config::SyncMode::Warp, } } diff --git a/client/cli/src/commands/purge_chain_cmd.rs b/client/cli/src/commands/purge_chain_cmd.rs index b0bf81ee5d3b7..9a3aeee50e944 100644 --- a/client/cli/src/commands/purge_chain_cmd.rs +++ b/client/cli/src/commands/purge_chain_cmd.rs @@ -64,7 +64,7 @@ impl PurgeChainCmd { Some('y') | Some('Y') => {}, _ => { println!("Aborted"); - return Ok(()); + return Ok(()) }, } } diff --git a/client/cli/src/commands/run_cmd.rs b/client/cli/src/commands/run_cmd.rs index 093779f66dc0c..35181d83f805f 100644 --- a/client/cli/src/commands/run_cmd.rs +++ b/client/cli/src/commands/run_cmd.rs @@ -499,19 +499,19 @@ impl CliConfiguration for RunCmd { pub fn is_node_name_valid(_name: &str) -> std::result::Result<(), &str> { let name = _name.to_string(); if name.chars().count() >= crate::NODE_NAME_MAX_LENGTH { - return Err("Node name too long"); + return Err("Node name too long") } let invalid_chars = r"[\\.@]"; let re = Regex::new(invalid_chars).unwrap(); if re.is_match(&name) { - return Err("Node name should not contain invalid chars such as '.' and '@'"); + return Err("Node name should not contain invalid chars such as '.' and '@'") } let invalid_patterns = r"(https?:\\/+)?(www)+"; let re = Regex::new(invalid_patterns).unwrap(); if re.is_match(&name) { - return Err("Node name should not contain urls"); + return Err("Node name should not contain urls") } Ok(()) @@ -529,7 +529,7 @@ fn rpc_interface( a validator. Use `--unsafe-rpc-external` or `--rpc-methods=unsafe` if you understand \ the risks. See the options description for more information." .to_owned(), - )); + )) } if is_external || is_unsafe_external { @@ -604,7 +604,7 @@ fn parse_cors(s: &str) -> Result { match part { "all" | "*" => { is_all = true; - break; + break }, other => origins.push(other.to_owned()), } diff --git a/client/cli/src/commands/vanity.rs b/client/cli/src/commands/vanity.rs index ea3a89eda18b1..ae0007ac7964d 100644 --- a/client/cli/src/commands/vanity.rs +++ b/client/cli/src/commands/vanity.rs @@ -103,7 +103,7 @@ where best = score; if best >= top { println!("best: {} == top: {}", best, top); - return Ok(utils::format_seed::(seed.clone())); + return Ok(utils::format_seed::(seed.clone())) } } done += 1; @@ -131,7 +131,7 @@ fn next_seed(seed: &mut [u8]) { }, _ => { *s += 1; - break; + break }, } } @@ -144,7 +144,7 @@ fn calculate_score(_desired: &str, key: &str) -> usize { let snip_size = _desired.len() - truncate; let truncated = &_desired[0..snip_size]; if let Some(pos) = key.find(truncated) { - return (47 - pos) + (snip_size * 48); + return (47 - pos) + (snip_size * 48) } } 0 diff --git a/client/cli/src/commands/verify.rs b/client/cli/src/commands/verify.rs index 8f23eb03fc62c..82554fbf268fa 100644 --- a/client/cli/src/commands/verify.rs +++ b/client/cli/src/commands/verify.rs @@ -81,7 +81,7 @@ where if Pair::verify(&signature, &message, &pubkey) { println!("Signature verifies correctly."); } else { - return Err(error::Error::SignatureInvalid); + return Err(error::Error::SignatureInvalid) } Ok(()) diff --git a/client/cli/src/config.rs b/client/cli/src/config.rs index c3249b09ec16e..77689708a231f 100644 --- a/client/cli/src/config.rs +++ b/client/cli/src/config.rs @@ -672,7 +672,7 @@ pub fn generate_node_name() -> String { let count = node_name.chars().count(); if count < NODE_NAME_MAX_LENGTH { - return node_name; + return node_name } } } diff --git a/client/cli/src/params/network_params.rs b/client/cli/src/params/network_params.rs index d65193fe56ed7..5580dea45bde6 100644 --- a/client/cli/src/params/network_params.rs +++ b/client/cli/src/params/network_params.rs @@ -197,16 +197,15 @@ impl NetworkParams { // Activate if the user explicitly requested local discovery, `--dev` is given or the // chain type is `Local`/`Development` let allow_non_globals_in_dht = - self.discover_local - || is_dev || matches!(chain_type, ChainType::Local | ChainType::Development); + self.discover_local || + is_dev || matches!(chain_type, ChainType::Local | ChainType::Development); let allow_private_ipv4 = match (self.allow_private_ipv4, self.no_private_ipv4) { (true, true) => unreachable!("`*_private_ipv4` flags are mutually exclusive; qed"), (true, false) => true, (false, true) => false, - (false, false) => { - is_dev || matches!(chain_type, ChainType::Local | ChainType::Development) - }, + (false, false) => + is_dev || matches!(chain_type, ChainType::Local | ChainType::Development), }; NetworkConfiguration { diff --git a/client/cli/src/params/node_key_params.rs b/client/cli/src/params/node_key_params.rs index 75692f7e2a908..2346455c26a37 100644 --- a/client/cli/src/params/node_key_params.rs +++ b/client/cli/src/params/node_key_params.rs @@ -142,9 +142,7 @@ mod tests { params.node_key(net_config_dir).and_then(|c| match c { NodeKeyConfig::Ed25519(sc_network::config::Secret::Input(ref ski)) if node_key_type == NodeKeyType::Ed25519 && &sk[..] == ski.as_ref() => - { - Ok(()) - }, + Ok(()), _ => Err(error::Error::Input("Unexpected node key config".into())), }) }) @@ -204,9 +202,7 @@ mod tests { params.node_key(net_config_dir).and_then(move |c| match c { NodeKeyConfig::Ed25519(sc_network::config::Secret::File(ref f)) if typ == NodeKeyType::Ed25519 && f == &dir.join(NODE_KEY_ED25519_FILE) => - { - Ok(()) - }, + Ok(()), _ => Err(error::Error::Input("Unexpected node key config".into())), }) }) diff --git a/client/cli/src/params/shared_params.rs b/client/cli/src/params/shared_params.rs index 2fb7ea669ace1..5cbb6dbad54a3 100644 --- a/client/cli/src/params/shared_params.rs +++ b/client/cli/src/params/shared_params.rs @@ -100,13 +100,12 @@ impl SharedParams { pub fn chain_id(&self, is_dev: bool) -> String { match self.chain { Some(ref chain) => chain.clone(), - None => { + None => if is_dev { "dev".into() } else { "".into() - } - }, + }, } } diff --git a/client/consensus/aura/src/import_queue.rs b/client/consensus/aura/src/import_queue.rs index 76786074d6f15..b17feae45897e 100644 --- a/client/consensus/aura/src/import_queue.rs +++ b/client/consensus/aura/src/import_queue.rs @@ -231,9 +231,8 @@ where // skip the inherents verification if the runtime API is old or not expected to // exist. - if !block.state_action.skip_execution_checks() - && self - .client + if !block.state_action.skip_execution_checks() && + self.client .runtime_api() .has_api_with::, _>( &BlockId::Hash(parent_hash), diff --git a/client/consensus/aura/src/lib.rs b/client/consensus/aura/src/lib.rs index d2fa20ed3543d..50a02726cf56a 100644 --- a/client/consensus/aura/src/lib.rs +++ b/client/consensus/aura/src/lib.rs @@ -126,7 +126,7 @@ where /// Get slot author for given block along with authorities. fn slot_author(slot: Slot, authorities: &[AuthorityId

]) -> Option<&AuthorityId

> { if authorities.is_empty() { - return None; + return None } let idx = *slot % (authorities.len() as u64); @@ -490,7 +490,7 @@ where self.client.info().finalized_number, slot, self.logging_target(), - ); + ) } } false @@ -575,7 +575,7 @@ impl From> for String { /// Get pre-digests from the header pub fn find_pre_digest(header: &B::Header) -> Result> { if header.number().is_zero() { - return Ok(0.into()); + return Ok(0.into()) } let mut pre_digest: Option = None; @@ -607,7 +607,7 @@ where match compatibility_mode { CompatibilityMode::None => {}, // Use `initialize_block` until we hit the block that should disable the mode. - CompatibilityMode::UseInitializeBlock { until } => { + CompatibilityMode::UseInitializeBlock { until } => if *until > context_block_number { runtime_api .initialize_block( @@ -621,8 +621,7 @@ where ), ) .map_err(|_| sp_consensus::Error::InvalidAuthoritiesSet)?; - } - }, + }, } runtime_api diff --git a/client/consensus/babe/src/authorship.rs b/client/consensus/babe/src/authorship.rs index f9617807db626..b39153faa6d1a 100644 --- a/client/consensus/babe/src/authorship.rs +++ b/client/consensus/babe/src/authorship.rs @@ -46,13 +46,13 @@ pub(super) fn calculate_primary_threshold( // configuration parameters, this is not sufficient to guarantee that `c.1` is non-zero // (i.e. third party implementations are possible). if c.1 == 0 || authority_index >= authorities.len() { - return 0; + return 0 } let c = c.0 as f64 / c.1 as f64; - let theta = authorities[authority_index].1 as f64 - / authorities.iter().map(|(_, weight)| weight).sum::() as f64; + let theta = authorities[authority_index].1 as f64 / + authorities.iter().map(|(_, weight)| weight).sum::() as f64; assert!(theta > 0.0, "authority with weight 0."); @@ -109,7 +109,7 @@ pub(super) fn secondary_slot_author( randomness: [u8; 32], ) -> Option<&AuthorityId> { if authorities.is_empty() { - return None; + return None } let rand = U256::from((randomness, slot).using_encoded(blake2_256)); @@ -138,7 +138,7 @@ fn claim_secondary_slot( let Epoch { authorities, randomness, epoch_index, .. } = epoch; if authorities.is_empty() { - return None; + return None } let expected_author = secondary_slot_author(slot, authorities, *randomness)?; @@ -176,7 +176,7 @@ fn claim_secondary_slot( }; if let Some(pre_digest) = pre_digest { - return Some((pre_digest, authority_id.clone())); + return Some((pre_digest, authority_id.clone())) } } } @@ -211,8 +211,8 @@ pub fn claim_slot_using_keys( keys: &[(AuthorityId, usize)], ) -> Option<(PreDigest, AuthorityId)> { claim_primary_slot(slot, epoch, epoch.config.c, keystore, keys).or_else(|| { - if epoch.config.allowed_slots.is_secondary_plain_slots_allowed() - || epoch.config.allowed_slots.is_secondary_vrf_slots_allowed() + if epoch.config.allowed_slots.is_secondary_plain_slots_allowed() || + epoch.config.allowed_slots.is_secondary_vrf_slots_allowed() { claim_secondary_slot( slot, @@ -265,7 +265,7 @@ fn claim_primary_slot( authority_index: *authority_index as u32, }); - return Some((pre_digest, authority_id.clone())); + return Some((pre_digest, authority_id.clone())) } } } diff --git a/client/consensus/babe/src/aux_schema.rs b/client/consensus/babe/src/aux_schema.rs index 8a4aa2a2c9bbb..fef84bda86974 100644 --- a/client/consensus/babe/src/aux_schema.rs +++ b/client/consensus/babe/src/aux_schema.rs @@ -62,25 +62,21 @@ pub fn load_epoch_changes( let version = load_decode::<_, u32>(backend, BABE_EPOCH_CHANGES_VERSION)?; let maybe_epoch_changes = match version { - None => { + None => load_decode::<_, EpochChangesV0For>(backend, BABE_EPOCH_CHANGES_KEY)? - .map(|v0| v0.migrate().map(|_, _, epoch| epoch.migrate(config))) - }, - Some(1) => { + .map(|v0| v0.migrate().map(|_, _, epoch| epoch.migrate(config))), + Some(1) => load_decode::<_, EpochChangesV1For>(backend, BABE_EPOCH_CHANGES_KEY)? - .map(|v1| v1.migrate().map(|_, _, epoch| epoch.migrate(config))) - }, + .map(|v1| v1.migrate().map(|_, _, epoch| epoch.migrate(config))), Some(2) => { // v2 still uses `EpochChanges` v1 format but with a different `Epoch` type. load_decode::<_, EpochChangesV1For>(backend, BABE_EPOCH_CHANGES_KEY)? .map(|v2| v2.migrate()) }, - Some(BABE_EPOCH_CHANGES_CURRENT_VERSION) => { - load_decode::<_, EpochChangesFor>(backend, BABE_EPOCH_CHANGES_KEY)? - }, - Some(other) => { - return Err(ClientError::Backend(format!("Unsupported BABE DB version: {:?}", other))) - }, + Some(BABE_EPOCH_CHANGES_CURRENT_VERSION) => + load_decode::<_, EpochChangesFor>(backend, BABE_EPOCH_CHANGES_KEY)?, + Some(other) => + return Err(ClientError::Backend(format!("Unsupported BABE DB version: {:?}", other))), }; let epoch_changes = @@ -203,8 +199,8 @@ mod test { .tree() .iter() .map(|(_, _, epoch)| epoch.clone()) - .collect::>() - == vec![PersistedEpochHeader::Regular(EpochHeader { + .collect::>() == + vec![PersistedEpochHeader::Regular(EpochHeader { start_slot: 0.into(), end_slot: 100.into(), })], diff --git a/client/consensus/babe/src/lib.rs b/client/consensus/babe/src/lib.rs index 1aa3bf27de484..109e5aade02a7 100644 --- a/client/consensus/babe/src/lib.rs +++ b/client/consensus/babe/src/lib.rs @@ -359,11 +359,10 @@ where } }, Some(2) => runtime_api.configuration(&block_id)?, - _ => { + _ => return Err(sp_blockchain::Error::VersionInvalid( "Unsupported or invalid BabeApi version".to_string(), - )) - }, + )), }; Ok(config) } @@ -562,7 +561,7 @@ fn aux_storage_cleanup + HeaderBackend, Block: B .flatten() .map_or(true, |h| h == hash) { - break; + break } }, Err(err) => { @@ -572,7 +571,7 @@ fn aux_storage_cleanup + HeaderBackend, Block: B hash, err, ); - break; + break }, } } @@ -799,14 +798,13 @@ where let sinks = &mut self.slot_notification_sinks.lock(); sinks.retain_mut(|sink| match sink.try_send((slot, epoch_descriptor.clone())) { Ok(()) => true, - Err(e) => { + Err(e) => if e.is_full() { warn!(target: "babe", "Trying to notify a slot but the channel is full"); true } else { false - } - }, + }, }); } @@ -875,7 +873,7 @@ where self.client.info().finalized_number, slot, self.logging_target(), - ); + ) } } false @@ -924,7 +922,7 @@ pub fn find_pre_digest(header: &B::Header) -> Result = None; @@ -948,9 +946,8 @@ fn find_next_epoch_digest( trace!(target: "babe", "Checking log {:?}, looking for epoch change digest.", log); let log = log.try_to::(OpaqueDigestItemId::Consensus(&BABE_ENGINE_ID)); match (log, epoch_digest.is_some()) { - (Some(ConsensusLog::NextEpochData(_)), true) => { - return Err(babe_err(Error::MultipleEpochChangeDigests)) - }, + (Some(ConsensusLog::NextEpochData(_)), true) => + return Err(babe_err(Error::MultipleEpochChangeDigests)), (Some(ConsensusLog::NextEpochData(epoch)), false) => epoch_digest = Some(epoch), _ => trace!(target: "babe", "Ignoring digest not meant for us"), } @@ -968,9 +965,8 @@ fn find_next_config_digest( trace!(target: "babe", "Checking log {:?}, looking for epoch change digest.", log); let log = log.try_to::(OpaqueDigestItemId::Consensus(&BABE_ENGINE_ID)); match (log, config_digest.is_some()) { - (Some(ConsensusLog::NextConfigData(_)), true) => { - return Err(babe_err(Error::MultipleConfigChangeDigests)) - }, + (Some(ConsensusLog::NextConfigData(_)), true) => + return Err(babe_err(Error::MultipleConfigChangeDigests)), (Some(ConsensusLog::NextConfigData(config)), false) => config_digest = Some(config), _ => trace!(target: "babe", "Ignoring digest not meant for us"), } @@ -1053,7 +1049,7 @@ where // don't report any equivocations during initial sync // as they are most likely stale. if *origin == BlockOrigin::NetworkInitialSync { - return Ok(()); + return Ok(()) } // check if authorship of this header is an equivocation and return a proof if so. @@ -1103,7 +1099,7 @@ where Some(proof) => proof, None => { debug!(target: "babe", "Equivocation offender is not part of the authority set."); - return Ok(()); + return Ok(()) }, }, }; @@ -1164,7 +1160,7 @@ where // read it from the state after import. We also skip all verifications // because there's no parent state and we trust the sync module to verify // that the state is correct and finalized. - return Ok((block, Default::default())); + return Ok((block, Default::default())) } debug!(target: "babe", "We have {:?} logs in this header", block.header.digest().logs().len()); @@ -1365,12 +1361,11 @@ where let import_result = self.inner.import_block(block, new_cache).await; let aux = match import_result { Ok(ImportResult::Imported(aux)) => aux, - Ok(r) => { + Ok(r) => return Err(ConsensusError::ClientImport(format!( "Unexpected import result: {:?}", r - ))) - }, + ))), Err(r) => return Err(r.into()), }; @@ -1426,14 +1421,14 @@ where // When re-importing existing block strip away intermediates. let _ = block.remove_intermediate::>(INTERMEDIATE_KEY); block.fork_choice = Some(ForkChoiceStrategy::Custom(false)); - return self.inner.import_block(block, new_cache).await.map_err(Into::into); + return self.inner.import_block(block, new_cache).await.map_err(Into::into) }, Ok(sp_blockchain::BlockStatus::Unknown) => {}, Err(e) => return Err(ConsensusError::ClientImport(e.to_string())), } if block.with_state() { - return self.import_state(block, new_cache).await; + return self.import_state(block, new_cache).await } let pre_digest = find_pre_digest::(&block.header).expect( @@ -1461,7 +1456,7 @@ where if slot <= parent_slot { return Err(ConsensusError::ClientImport( babe_err(Error::::SlotMustIncrease(parent_slot, slot)).into(), - )); + )) } // if there's a pending epoch we'll save the previous epoch changes here @@ -1511,21 +1506,18 @@ where match (first_in_epoch, next_epoch_digest.is_some(), next_config_digest.is_some()) { (true, true, _) => {}, (false, false, false) => {}, - (false, false, true) => { + (false, false, true) => return Err(ConsensusError::ClientImport( babe_err(Error::::UnexpectedConfigChange).into(), - )) - }, - (true, false, _) => { + )), + (true, false, _) => return Err(ConsensusError::ClientImport( babe_err(Error::::ExpectedEpochChange(hash, slot)).into(), - )) - }, - (false, true, _) => { + )), + (false, true, _) => return Err(ConsensusError::ClientImport( babe_err(Error::::UnexpectedEpochChange).into(), - )) - }, + )), } let info = self.client.info(); @@ -1598,7 +1590,7 @@ where debug!(target: "babe", "Failed to launch next epoch: {}", e); *epoch_changes = old_epoch_changes.expect("set `Some` above and not taken; qed"); - return Err(e); + return Err(e) } crate::aux_schema::write_epoch_changes::(&*epoch_changes, |insert| { @@ -1822,7 +1814,7 @@ where let revertible = blocks.min(best_number - finalized); if revertible == Zero::zero() { - return Ok(()); + return Ok(()) } let revert_up_to_number = best_number - revertible; @@ -1858,11 +1850,11 @@ where let mut hash = leaf; loop { let meta = client.header_metadata(hash)?; - if meta.number <= revert_up_to_number - || !weight_keys.insert(aux_schema::block_weight_key(hash)) + if meta.number <= revert_up_to_number || + !weight_keys.insert(aux_schema::block_weight_key(hash)) { // We've reached the revert point or an already processed branch, stop here. - break; + break } hash = meta.parent; } diff --git a/client/consensus/babe/src/tests.rs b/client/consensus/babe/src/tests.rs index 64a9520a77321..8bef1b38b929d 100644 --- a/client/consensus/babe/src/tests.rs +++ b/client/consensus/babe/src/tests.rs @@ -582,7 +582,7 @@ fn can_author_block() { None => i += 1, Some(s) => { debug!(target: "babe", "Authored block {:?}", s.0); - break; + break }, } } diff --git a/client/consensus/babe/src/verification.rs b/client/consensus/babe/src/verification.rs index 3592a18e7b548..53ec3002e6a85 100644 --- a/client/consensus/babe/src/verification.rs +++ b/client/consensus/babe/src/verification.rs @@ -83,7 +83,7 @@ pub(super) fn check_header( if pre_digest.slot() > slot_now { header.digest_mut().push(seal); - return Ok(CheckedHeader::Deferred(header, pre_digest.slot())); + return Ok(CheckedHeader::Deferred(header, pre_digest.slot())) } let author = match authorities.get(pre_digest.authority_index() as usize) { @@ -168,7 +168,7 @@ fn check_primary_header( calculate_primary_threshold(c, &epoch.authorities, pre_digest.authority_index as usize); if !check_primary_threshold(&inout, threshold) { - return Err(babe_err(Error::VRFVerificationOfBlockFailed(author.clone(), threshold))); + return Err(babe_err(Error::VRFVerificationOfBlockFailed(author.clone(), threshold))) } Ok(()) @@ -196,7 +196,7 @@ fn check_secondary_plain_header( let author = &epoch.authorities[pre_digest.authority_index as usize].0; if expected_author != author { - return Err(Error::InvalidAuthor(expected_author.clone(), author.clone())); + return Err(Error::InvalidAuthor(expected_author.clone(), author.clone())) } if AuthorityPair::verify(&signature, pre_hash.as_ref(), author) { @@ -222,7 +222,7 @@ fn check_secondary_vrf_header( let author = &epoch.authorities[pre_digest.authority_index as usize].0; if expected_author != author { - return Err(Error::InvalidAuthor(expected_author.clone(), author.clone())); + return Err(Error::InvalidAuthor(expected_author.clone(), author.clone())) } if AuthorityPair::verify(&signature, pre_hash.as_ref(), author) { diff --git a/client/consensus/common/src/block_import.rs b/client/consensus/common/src/block_import.rs index dc9908ad3df34..f888176addd2d 100644 --- a/client/consensus/common/src/block_import.rs +++ b/client/consensus/common/src/block_import.rs @@ -157,9 +157,9 @@ impl StateAction { /// Check if execution checks that require runtime calls should be skipped. pub fn skip_execution_checks(&self) -> bool { match self { - StateAction::ApplyChanges(_) - | StateAction::Execute - | StateAction::ExecuteIfPossible => false, + StateAction::ApplyChanges(_) | + StateAction::Execute | + StateAction::ExecuteIfPossible => false, StateAction::Skip => true, } } @@ -270,9 +270,8 @@ impl BlockImportParams { ) -> BlockImportParams { // Preserve imported state. let state_action = match self.state_action { - StateAction::ApplyChanges(StorageChanges::Import(state)) => { - StateAction::ApplyChanges(StorageChanges::Import(state)) - }, + StateAction::ApplyChanges(StorageChanges::Import(state)) => + StateAction::ApplyChanges(StorageChanges::Import(state)), StateAction::ApplyChanges(StorageChanges::Changes(_)) => StateAction::Skip, StateAction::Execute => StateAction::Execute, StateAction::ExecuteIfPossible => StateAction::ExecuteIfPossible, diff --git a/client/consensus/common/src/import_queue.rs b/client/consensus/common/src/import_queue.rs index 36e1dc79fc72e..3741fa99663cd 100644 --- a/client/consensus/common/src/import_queue.rs +++ b/client/consensus/common/src/import_queue.rs @@ -164,8 +164,8 @@ impl BlockImportStatus { /// Returns the imported block number. pub fn number(&self) -> &N { match self { - BlockImportStatus::ImportedKnown(n, _) - | BlockImportStatus::ImportedUnknown(n, _, _) => n, + BlockImportStatus::ImportedKnown(n, _) | + BlockImportStatus::ImportedUnknown(n, _, _) => n, } } } @@ -236,7 +236,7 @@ pub(crate) async fn import_single_block_metered< } else { debug!(target: "sync", "Header {} was not provided ", block.hash); } - return Err(BlockImportError::IncompleteHeader(peer)); + return Err(BlockImportError::IncompleteHeader(peer)) }, }; @@ -251,9 +251,8 @@ pub(crate) async fn import_single_block_metered< trace!(target: "sync", "Block already in chain {}: {:?}", number, hash); Ok(BlockImportStatus::ImportedKnown(number, peer)) }, - Ok(ImportResult::Imported(aux)) => { - Ok(BlockImportStatus::ImportedUnknown(number, aux, peer)) - }, + Ok(ImportResult::Imported(aux)) => + Ok(BlockImportStatus::ImportedUnknown(number, aux, peer)), Ok(ImportResult::MissingState) => { debug!(target: "sync", "Parent state is missing for {}: {:?}, parent: {:?}", number, hash, parent_hash); diff --git a/client/consensus/common/src/import_queue/basic_queue.rs b/client/consensus/common/src/import_queue/basic_queue.rs index 7bf591a1f174e..0e607159b75c3 100644 --- a/client/consensus/common/src/import_queue/basic_queue.rs +++ b/client/consensus/common/src/import_queue/basic_queue.rs @@ -102,7 +102,7 @@ impl BasicQueue { impl ImportQueue for BasicQueue { fn import_blocks(&mut self, origin: BlockOrigin, blocks: Vec>) { if blocks.is_empty() { - return; + return } trace!(target: "sync", "Scheduling {} blocks for import", blocks.len()); @@ -183,7 +183,7 @@ async fn block_import_process( target: "block-import", "Stopping block import because the import channel was closed!", ); - return; + return }, }; @@ -257,27 +257,26 @@ impl BlockImportWorker { target: "block-import", "Stopping block import because result channel was closed!", ); - return; + return } // Make sure to first process all justifications while let Poll::Ready(justification) = futures::poll!(justification_port.next()) { match justification { - Some(ImportJustification(who, hash, number, justification)) => { - worker.import_justification(who, hash, number, justification).await - }, + Some(ImportJustification(who, hash, number, justification)) => + worker.import_justification(who, hash, number, justification).await, None => { log::debug!( target: "block-import", "Stopping block import because justification channel was closed!", ); - return; + return }, } } if let Poll::Ready(()) = futures::poll!(&mut block_import_process) { - return; + return } // All futures that we polled are now pending. @@ -371,7 +370,7 @@ async fn import_many_blocks, Transaction: Send + 'stat Some(b) => b, None => { // No block left to import, success! - return ImportManyBlocksResult { block_count: count, imported, results }; + return ImportManyBlocksResult { block_count: count, imported, results } }, }; diff --git a/client/consensus/common/src/import_queue/buffered_link.rs b/client/consensus/common/src/import_queue/buffered_link.rs index 94cd12ef7d3b0..5d418dddf0853 100644 --- a/client/consensus/common/src/import_queue/buffered_link.rs +++ b/client/consensus/common/src/import_queue/buffered_link.rs @@ -139,15 +139,12 @@ impl BufferedLinkReceiver { }; match msg { - BlockImportWorkerMsg::BlocksProcessed(imported, count, results) => { - link.blocks_processed(imported, count, results) - }, - BlockImportWorkerMsg::JustificationImported(who, hash, number, success) => { - link.justification_imported(who, &hash, number, success) - }, - BlockImportWorkerMsg::RequestJustification(hash, number) => { - link.request_justification(&hash, number) - }, + BlockImportWorkerMsg::BlocksProcessed(imported, count, results) => + link.blocks_processed(imported, count, results), + BlockImportWorkerMsg::JustificationImported(who, hash, number, success) => + link.justification_imported(who, &hash, number, success), + BlockImportWorkerMsg::RequestJustification(hash, number) => + link.request_justification(&hash, number), } } } diff --git a/client/consensus/epochs/src/lib.rs b/client/consensus/epochs/src/lib.rs index e10d07c146a49..c213a49b8e4e4 100644 --- a/client/consensus/epochs/src/lib.rs +++ b/client/consensus/epochs/src/lib.rs @@ -199,9 +199,8 @@ where pub fn increment(&self, next_descriptor: E::NextEpochDescriptor) -> IncrementedEpoch { let next = self.as_ref().increment(next_descriptor); let to_persist = match *self { - ViableEpoch::UnimportedGenesis(ref epoch_0) => { - PersistedEpoch::Genesis(epoch_0.clone(), next) - }, + ViableEpoch::UnimportedGenesis(ref epoch_0) => + PersistedEpoch::Genesis(epoch_0.clone(), next), ViableEpoch::Signaled(_) => PersistedEpoch::Regular(next), }; @@ -247,9 +246,8 @@ impl PersistedEpoch { impl<'a, E: Epoch> From<&'a PersistedEpoch> for PersistedEpochHeader { fn from(epoch: &'a PersistedEpoch) -> Self { match epoch { - PersistedEpoch::Genesis(ref epoch_0, ref epoch_1) => { - PersistedEpochHeader::Genesis(epoch_0.into(), epoch_1.into()) - }, + PersistedEpoch::Genesis(ref epoch_0, ref epoch_1) => + PersistedEpochHeader::Genesis(epoch_0.into(), epoch_1.into()), PersistedEpoch::Regular(ref epoch_n) => PersistedEpochHeader::Regular(epoch_n.into()), } } @@ -263,9 +261,8 @@ impl PersistedEpoch { F: FnMut(&Hash, &Number, E) -> B, { match self { - PersistedEpoch::Genesis(epoch_0, epoch_1) => { - PersistedEpoch::Genesis(f(h, n, epoch_0), f(h, n, epoch_1)) - }, + PersistedEpoch::Genesis(epoch_0, epoch_1) => + PersistedEpoch::Genesis(f(h, n, epoch_0), f(h, n, epoch_1)), PersistedEpoch::Regular(epoch_n) => PersistedEpoch::Regular(f(h, n, epoch_n)), } } @@ -350,40 +347,33 @@ where match &self.current { (_, _, PersistedEpoch::Genesis(epoch_0, _)) if slot >= epoch_0.start_slot() && slot < epoch_0.end_slot() => - { return Some(( self.current.0, self.current.1, epoch_0.into(), EpochIdentifierPosition::Genesis0, - )) - }, + )), (_, _, PersistedEpoch::Genesis(_, epoch_1)) if slot >= epoch_1.start_slot() && slot < epoch_1.end_slot() => - { return Some(( self.current.0, self.current.1, epoch_1.into(), EpochIdentifierPosition::Genesis1, - )) - }, + )), (_, _, PersistedEpoch::Regular(epoch_n)) if slot >= epoch_n.start_slot() && slot < epoch_n.end_slot() => - { return Some(( self.current.0, self.current.1, epoch_n.into(), EpochIdentifierPosition::Regular, - )) - }, + )), _ => {}, }; match &self.next { - Some((h, n, epoch_n)) if slot >= epoch_n.start_slot() && slot < epoch_n.end_slot() => { - Some((*h, *n, epoch_n.into(), EpochIdentifierPosition::Regular)) - }, + Some((h, n, epoch_n)) if slot >= epoch_n.start_slot() && slot < epoch_n.end_slot() => + Some((*h, *n, epoch_n.into(), EpochIdentifierPosition::Regular)), _ => None, } } @@ -394,27 +384,19 @@ where ((h, n, e), _) if h == &id.hash && n == &id.number => match e { PersistedEpoch::Genesis(ref epoch_0, _) if id.position == EpochIdentifierPosition::Genesis0 => - { - Some(epoch_0) - }, + Some(epoch_0), PersistedEpoch::Genesis(_, ref epoch_1) if id.position == EpochIdentifierPosition::Genesis1 => - { - Some(epoch_1) - }, + Some(epoch_1), PersistedEpoch::Regular(ref epoch_n) if id.position == EpochIdentifierPosition::Regular => - { - Some(epoch_n) - }, + Some(epoch_n), _ => None, }, (_, Some((h, n, e))) - if h == &id.hash - && n == &id.number && id.position == EpochIdentifierPosition::Regular => - { - Some(e) - }, + if h == &id.hash && + n == &id.number && id.position == EpochIdentifierPosition::Regular => + Some(e), _ => None, } } @@ -555,24 +537,18 @@ where /// Get a reference to an epoch with given identifier. pub fn epoch(&self, id: &EpochIdentifier) -> Option<&E> { if let Some(e) = &self.gap.as_ref().and_then(|gap| gap.epoch(id)) { - return Some(e); + return Some(e) } self.epochs.get(&(id.hash, id.number)).and_then(|v| match v { PersistedEpoch::Genesis(ref epoch_0, _) if id.position == EpochIdentifierPosition::Genesis0 => - { - Some(epoch_0) - }, + Some(epoch_0), PersistedEpoch::Genesis(_, ref epoch_1) if id.position == EpochIdentifierPosition::Genesis1 => - { - Some(epoch_1) - }, + Some(epoch_1), PersistedEpoch::Regular(ref epoch_n) if id.position == EpochIdentifierPosition::Regular => - { - Some(epoch_n) - }, + Some(epoch_n), _ => None, }) } @@ -587,12 +563,10 @@ where G: FnOnce(E::Slot) -> E, { match descriptor { - ViableEpochDescriptor::UnimportedGenesis(slot) => { - Some(ViableEpoch::UnimportedGenesis(make_genesis(*slot))) - }, - ViableEpochDescriptor::Signaled(identifier, _) => { - self.epoch(identifier).map(ViableEpoch::Signaled) - }, + ViableEpochDescriptor::UnimportedGenesis(slot) => + Some(ViableEpoch::UnimportedGenesis(make_genesis(*slot))), + ViableEpochDescriptor::Signaled(identifier, _) => + self.epoch(identifier).map(ViableEpoch::Signaled), } } @@ -601,19 +575,13 @@ where self.epochs.get_mut(&(id.hash, id.number)).and_then(|v| match v { PersistedEpoch::Genesis(ref mut epoch_0, _) if id.position == EpochIdentifierPosition::Genesis0 => - { - Some(epoch_0) - }, + Some(epoch_0), PersistedEpoch::Genesis(_, ref mut epoch_1) if id.position == EpochIdentifierPosition::Genesis1 => - { - Some(epoch_1) - }, + Some(epoch_1), PersistedEpoch::Regular(ref mut epoch_n) if id.position == EpochIdentifierPosition::Regular => - { - Some(epoch_n) - }, + Some(epoch_n), _ => None, }) } @@ -628,12 +596,10 @@ where G: FnOnce(E::Slot) -> E, { match descriptor { - ViableEpochDescriptor::UnimportedGenesis(slot) => { - Some(ViableEpoch::UnimportedGenesis(make_genesis(*slot))) - }, - ViableEpochDescriptor::Signaled(identifier, _) => { - self.epoch_mut(identifier).map(ViableEpoch::Signaled) - }, + ViableEpochDescriptor::UnimportedGenesis(slot) => + Some(ViableEpoch::UnimportedGenesis(make_genesis(*slot))), + ViableEpochDescriptor::Signaled(identifier, _) => + self.epoch_mut(identifier).map(ViableEpoch::Signaled), } } @@ -696,7 +662,7 @@ where ) -> Result>, fork_tree::Error> { if parent_number == Zero::zero() { // need to insert the genesis epoch. - return Ok(Some(ViableEpochDescriptor::UnimportedGenesis(slot))); + return Ok(Some(ViableEpochDescriptor::UnimportedGenesis(slot))) } if let Some(gap) = &self.gap { @@ -704,7 +670,7 @@ where return Ok(Some(ViableEpochDescriptor::Signaled( EpochIdentifier { position, hash, number }, hdr, - ))); + ))) } } @@ -748,9 +714,8 @@ where (EpochIdentifierPosition::Genesis0, epoch_0.clone()) } }, - PersistedEpochHeader::Regular(ref epoch_n) => { - (EpochIdentifierPosition::Regular, epoch_n.clone()) - }, + PersistedEpochHeader::Regular(ref epoch_n) => + (EpochIdentifierPosition::Regular, epoch_n.clone()), }, node, ) @@ -790,15 +755,15 @@ where Err(e) => PersistedEpoch::Regular(e), } } - } else if epoch.is_genesis() - && !self.epochs.is_empty() - && !self.epochs.values().any(|e| e.is_genesis()) + } else if epoch.is_genesis() && + !self.epochs.is_empty() && + !self.epochs.values().any(|e| e.is_genesis()) { // There's a genesis epoch imported when we already have an active epoch. // This happens after the warp sync as the ancient blocks download start. // We need to start tracking gap epochs here. self.gap = Some(GapEpochs { current: (hash, number, epoch), next: None }); - return Ok(()); + return Ok(()) } let res = self.inner.import(hash, number, header, &is_descendent_of); @@ -844,8 +809,8 @@ where let is_descendent_of = descendent_of_builder.build_is_descendent_of(None); let filter = |node_hash: &Hash, node_num: &Number, _: &PersistedEpochHeader| { - if number >= *node_num - && (is_descendent_of(node_hash, &hash).unwrap_or_default() || *node_hash == hash) + if number >= *node_num && + (is_descendent_of(node_hash, &hash).unwrap_or_default() || *node_hash == hash) { // Continue the search in this subtree. FilterAction::KeepNode @@ -907,7 +872,7 @@ mod tests { if let Some((ref c_head, ref c_parent)) = current { if head == c_head { if base == c_parent { - return Ok(true); + return Ok(true) } else { head = c_parent; } diff --git a/client/consensus/manual-seal/src/consensus/babe.rs b/client/consensus/manual-seal/src/consensus/babe.rs index 0cf33beddbbe4..206f5163a13cd 100644 --- a/client/consensus/manual-seal/src/consensus/babe.rs +++ b/client/consensus/manual-seal/src/consensus/babe.rs @@ -149,7 +149,7 @@ where authorities: Vec<(AuthorityId, BabeAuthorityWeight)>, ) -> Result { if authorities.is_empty() { - return Err(Error::StringError("Cannot supply empty authority set!".into())); + return Err(Error::StringError("Cannot supply empty authority set!".into())) } let config = sc_consensus_babe::configuration(&*client)?; @@ -299,15 +299,14 @@ where // manually hard code epoch descriptor epoch_descriptor = match epoch_descriptor { - ViableEpochDescriptor::Signaled(identifier, _header) => { + ViableEpochDescriptor::Signaled(identifier, _header) => ViableEpochDescriptor::Signaled( identifier, EpochHeader { start_slot: slot, end_slot: (*slot * self.config.epoch_length).into(), }, - ) - }, + ), _ => unreachable!( "we're not in the authorities, so this isn't the genesis epoch; qed" ), diff --git a/client/consensus/manual-seal/src/seal_block.rs b/client/consensus/manual-seal/src/seal_block.rs index c5dc5fd09a28f..32e3acf68506e 100644 --- a/client/consensus/manual-seal/src/seal_block.rs +++ b/client/consensus/manual-seal/src/seal_block.rs @@ -95,7 +95,7 @@ pub async fn seal_block( { let future = async { if pool.status().ready == 0 && !create_empty { - return Err(Error::EmptyTransactionPool); + return Err(Error::EmptyTransactionPool) } // get the header to build this new block on. @@ -135,7 +135,7 @@ pub async fn seal_block( .await?; if proposal.block.extrinsics().len() == inherents_len && !create_empty { - return Err(Error::EmptyTransactionPool); + return Err(Error::EmptyTransactionPool) } let (header, body) = proposal.block.deconstruct(); @@ -158,9 +158,8 @@ pub async fn seal_block( post_header.digest_mut().logs.extend(params.post_digests.iter().cloned()); match block_import.import_block(params, HashMap::new()).await? { - ImportResult::Imported(aux) => { - Ok(CreatedBlock { hash: ::Header::hash(&post_header), aux }) - }, + ImportResult::Imported(aux) => + Ok(CreatedBlock { hash: ::Header::hash(&post_header), aux }), other => Err(other.into()), } }; diff --git a/client/consensus/pow/src/lib.rs b/client/consensus/pow/src/lib.rs index cffc0fe3959af..dcf069d617bab 100644 --- a/client/consensus/pow/src/lib.rs +++ b/client/consensus/pow/src/lib.rs @@ -270,7 +270,7 @@ where execution_context: ExecutionContext, ) -> Result<(), Error> { if *block.header().number() < self.check_inherents_after { - return Ok(()); + return Ok(()) } let inherent_data = inherent_data_providers @@ -372,7 +372,7 @@ where &inner_seal, difficulty, )? { - return Err(Error::::InvalidSeal.into()); + return Err(Error::::InvalidSeal.into()) } aux.difficulty = difficulty; @@ -417,20 +417,19 @@ impl PowVerifier { let hash = header.hash(); let (seal, inner_seal) = match header.digest_mut().pop() { - Some(DigestItem::Seal(id, seal)) => { + Some(DigestItem::Seal(id, seal)) => if id == POW_ENGINE_ID { (DigestItem::Seal(id, seal.clone()), seal) } else { - return Err(Error::WrongEngine(id)); - } - }, + return Err(Error::WrongEngine(id)) + }, _ => return Err(Error::HeaderUnsealed(hash)), }; let pre_hash = header.hash(); if !self.algorithm.preliminary_verify(&pre_hash, &inner_seal)?.unwrap_or(true) { - return Err(Error::FailedPreliminaryVerify); + return Err(Error::FailedPreliminaryVerify) } Ok((header, seal)) @@ -527,13 +526,13 @@ where let task = async move { loop { if timer.next().await.is_none() { - break; + break } if sync_oracle.is_major_syncing() { debug!(target: "pow", "Skipping proposal due to sync."); worker.on_major_syncing(); - continue; + continue } let best_header = match select_chain.best_chain().await { @@ -545,13 +544,13 @@ where Select best chain error: {}", err ); - continue; + continue }, }; let best_hash = best_header.hash(); if worker.best_hash() == Some(best_hash) { - continue; + continue } // The worker is locked for the duration of the whole proposing period. Within this @@ -566,7 +565,7 @@ where Fetch difficulty failed: {}", err, ); - continue; + continue }, }; @@ -582,7 +581,7 @@ where Creating inherent data providers failed: {}", err, ); - continue; + continue }, }; @@ -595,7 +594,7 @@ where Creating inherent data failed: {}", e, ); - continue; + continue }, }; @@ -615,7 +614,7 @@ where Creating proposer failed: {:?}", err, ); - continue; + continue }, }; @@ -629,7 +628,7 @@ where Creating proposal failed: {}", err, ); - continue; + continue }, }; @@ -656,9 +655,8 @@ fn find_pre_digest(header: &B::Header) -> Result>, Err for log in header.digest().logs() { trace!(target: "pow", "Checking log {:?}, looking for pre runtime digest", log); match (log, pre_digest.is_some()) { - (DigestItem::PreRuntime(POW_ENGINE_ID, _), true) => { - return Err(Error::MultiplePreRuntimeDigests) - }, + (DigestItem::PreRuntime(POW_ENGINE_ID, _), true) => + return Err(Error::MultiplePreRuntimeDigests), (DigestItem::PreRuntime(POW_ENGINE_ID, v), false) => { pre_digest = Some(v.clone()); }, @@ -672,13 +670,12 @@ fn find_pre_digest(header: &B::Header) -> Result>, Err /// Fetch PoW seal. fn fetch_seal(digest: Option<&DigestItem>, hash: B::Hash) -> Result, Error> { match digest { - Some(DigestItem::Seal(id, seal)) => { + Some(DigestItem::Seal(id, seal)) => if id == &POW_ENGINE_ID { Ok(seal.clone()) } else { Err(Error::::WrongEngine(*id)) - } - }, + }, _ => Err(Error::::HeaderUnsealed(hash)), } } diff --git a/client/consensus/pow/src/worker.rs b/client/consensus/pow/src/worker.rs index 3e7fab50a610e..a00da6e7022fb 100644 --- a/client/consensus/pow/src/worker.rs +++ b/client/consensus/pow/src/worker.rs @@ -163,7 +163,7 @@ where target: "pow", "Unable to import mined block: seal is invalid", ); - return false; + return false }, Err(err) => { warn!( @@ -171,7 +171,7 @@ where "Unable to import mined block: {}", err, ); - return false; + return false }, } } else { @@ -179,7 +179,7 @@ where target: "pow", "Unable to import mined block: metadata does not exist", ); - return false; + return false } let build = if let Some(build) = { @@ -196,7 +196,7 @@ where target: "pow", "Unable to import mined block: build does not exist", ); - return false; + return false }; let seal = DigestItem::Seal(POW_ENGINE_ID, seal); diff --git a/client/consensus/slots/src/aux_schema.rs b/client/consensus/slots/src/aux_schema.rs index 378ad24c95c4f..c1d01500ffe47 100644 --- a/client/consensus/slots/src/aux_schema.rs +++ b/client/consensus/slots/src/aux_schema.rs @@ -64,7 +64,7 @@ where { // We don't check equivocations for old headers out of our capacity. if slot_now.saturating_sub(*slot) > MAX_SLOT_CAPACITY { - return Ok(None); + return Ok(None) } // Key for this slot. @@ -81,7 +81,7 @@ where if slot_now < first_saved_slot { // The code below assumes that slots will be visited sequentially. - return Ok(None); + return Ok(None) } for (prev_header, prev_signer) in headers_with_sig.iter() { @@ -101,7 +101,7 @@ where // since it's already saved and a possible equivocation // would have been detected before. Ok(None) - }; + } } } diff --git a/client/consensus/slots/src/lib.rs b/client/consensus/slots/src/lib.rs index a34cc85902451..90bfef6c1609c 100644 --- a/client/consensus/slots/src/lib.rs +++ b/client/consensus/slots/src/lib.rs @@ -215,7 +215,7 @@ pub trait SimpleSlotWorker { Either::Left((Err(err), _)) => { warn!(target: logging_target, "Proposing failed: {}", err); - return None; + return None }, Either::Right(_) => { info!( @@ -235,7 +235,7 @@ pub trait SimpleSlotWorker { "slot" => *slot, ); - return None; + return None }, }; @@ -262,7 +262,7 @@ pub trait SimpleSlotWorker { "Skipping proposal slot {} since there's no time left to propose", slot, ); - return None; + return None } else { Delay::new(proposing_remaining_duration) }; @@ -285,7 +285,7 @@ pub trait SimpleSlotWorker { "err" => ?err, ); - return None; + return None }, }; @@ -293,9 +293,9 @@ pub trait SimpleSlotWorker { let authorities_len = self.authorities_len(&aux_data); - if !self.force_authoring() - && self.sync_oracle().is_offline() - && authorities_len.map(|a| a > 1).unwrap_or(false) + if !self.force_authoring() && + self.sync_oracle().is_offline() && + authorities_len.map(|a| a > 1).unwrap_or(false) { debug!(target: logging_target, "Skipping proposal slot. Waiting for the network."); telemetry!( @@ -305,13 +305,13 @@ pub trait SimpleSlotWorker { "authorities_len" => authorities_len, ); - return None; + return None } let claim = self.claim_slot(&slot_info.chain_head, slot, &aux_data).await?; if self.should_backoff(slot, &slot_info.chain_head) { - return None; + return None } debug!(target: logging_target, "Starting authorship at slot: {slot}"); @@ -331,7 +331,7 @@ pub trait SimpleSlotWorker { "err" => ?err ); - return None; + return None }, }; @@ -358,7 +358,7 @@ pub trait SimpleSlotWorker { Err(err) => { warn!(target: logging_target, "Failed to create block import params: {}", err); - return None; + return None }, }; @@ -484,13 +484,13 @@ pub async fn start_slot_worker( Ok(r) => r, Err(e) => { warn!(target: "slots", "Error while polling for next slot: {}", e); - return; + return }, }; if sync_oracle.is_major_syncing() { debug!(target: "slots", "Skipping proposal slot due to sync."); - continue; + continue } let _ = worker.on_slot(slot_info).await; @@ -570,7 +570,7 @@ pub fn proposing_remaining_duration( // If parent is genesis block, we don't require any lenience factor. if slot_info.chain_head.number().is_zero() { - return proposing_duration; + return proposing_duration } let parent_slot = match parent_slot { @@ -733,7 +733,7 @@ where ) -> bool { // This should not happen, but we want to keep the previous behaviour if it does. if slot_now <= chain_head_slot { - return false; + return false } // There can be race between getting the finalized number and getting the best number. diff --git a/client/consensus/slots/src/slots.rs b/client/consensus/slots/src/slots.rs index 2bbdb0b7e0ee0..f3dc485a8e819 100644 --- a/client/consensus/slots/src/slots.rs +++ b/client/consensus/slots/src/slots.rs @@ -155,7 +155,7 @@ where ); // Let's try at the next slot.. self.inner_delay.take(); - continue; + continue }, }; @@ -178,7 +178,7 @@ where if slot > self.last_slot { self.last_slot = slot; - break Ok(SlotInfo::new(slot, inherent_data, self.slot_duration, chain_head, None)); + break Ok(SlotInfo::new(slot, inherent_data, self.slot_duration, chain_head, None)) } } } diff --git a/client/db/src/bench.rs b/client/db/src/bench.rs index db6d6cb7146ca..13d91fff0b555 100644 --- a/client/db/src/bench.rs +++ b/client/db/src/bench.rs @@ -177,7 +177,7 @@ impl BenchmarkingState { // Childtrie is identified by its storage key (i.e. `ChildInfo::storage_key`) fn add_read_key(&self, childtrie: Option<&[u8]>, key: &[u8]) { if !self.enable_tracking { - return; + return } let mut child_key_tracker = self.child_key_tracker.borrow_mut(); @@ -218,7 +218,7 @@ impl BenchmarkingState { // Childtrie is identified by its storage key (i.e. `ChildInfo::storage_key`) fn add_write_key(&self, childtrie: Option<&[u8]>, key: &[u8]) { if !self.enable_tracking { - return; + return } let mut child_key_tracker = self.child_key_tracker.borrow_mut(); @@ -489,7 +489,7 @@ impl StateBackend> for BenchmarkingState { }) }); } else { - return Err("Trying to commit to a closed db".into()); + return Err("Trying to commit to a closed db".into()) } self.reopen() } diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index c4ed98e54d553..305db2284b2ed 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -529,16 +529,15 @@ impl sc_client_api::blockchain::HeaderBackend for Blockcha BlockId::Hash(h) => { let mut cache = self.header_cache.lock(); if let Some(result) = cache.get_refresh(h) { - return Ok(result.clone()); + return Ok(result.clone()) } let header = utils::read_header(&*self.db, columns::KEY_LOOKUP, columns::HEADER, id)?; cache_header(&mut cache, *h, header.clone()); Ok(header) }, - BlockId::Number(_) => { - utils::read_header(&*self.db, columns::KEY_LOOKUP, columns::HEADER, id) - }, + BlockId::Number(_) => + utils::read_header(&*self.db, columns::KEY_LOOKUP, columns::HEADER, id), } } @@ -585,12 +584,11 @@ impl sc_client_api::blockchain::Backend for BlockchainDb return Ok(Some(body)), - Err(err) => { + Err(err) => return Err(sp_blockchain::Error::Backend(format!( "Error decoding body: {}", err - ))) - }, + ))), } } @@ -620,12 +618,11 @@ impl sc_client_api::blockchain::Backend for BlockchainDb { + None => return Err(sp_blockchain::Error::Backend(format!( "Missing indexed transaction {:?}", hash - ))) - }, + ))), }; }, DbExtrinsic::Full(ex) => { @@ -633,14 +630,13 @@ impl sc_client_api::blockchain::Backend for BlockchainDb { + Err(err) => return Err(sp_blockchain::Error::Backend(format!( "Error decoding body list: {}", err - ))) - }, + ))), } } Ok(None) @@ -655,12 +651,11 @@ impl sc_client_api::blockchain::Backend for BlockchainDb match Decode::decode(&mut &justifications[..]) { Ok(justifications) => Ok(Some(justifications)), - Err(err) => { + Err(err) => return Err(sp_blockchain::Error::Backend(format!( "Error decoding justifications: {}", err - ))) - }, + ))), }, None => Ok(None), } @@ -716,20 +711,18 @@ impl sc_client_api::blockchain::Backend for BlockchainDb transactions.push(t), - None => { + None => return Err(sp_blockchain::Error::Backend(format!( "Missing indexed transaction {:?}", hash - ))) - }, + ))), } } } Ok(Some(transactions)) }, - Err(err) => { - Err(sp_blockchain::Error::Backend(format!("Error decoding body list: {}", err))) - }, + Err(err) => + Err(sp_blockchain::Error::Backend(format!("Error decoding body list: {}", err))), } } } @@ -793,9 +786,8 @@ impl BlockImportOperation { count += 1; let key = crate::offchain::concatenate_prefix_and_key(&prefix, &key); match value_operation { - OffchainOverlayedChange::SetValue(val) => { - transaction.set_from_vec(columns::OFFCHAIN, &key, val) - }, + OffchainOverlayedChange::SetValue(val) => + transaction.set_from_vec(columns::OFFCHAIN, &key, val), OffchainOverlayedChange::Remove => transaction.remove(columns::OFFCHAIN, &key), } } @@ -820,7 +812,7 @@ impl BlockImportOperation { state_version: StateVersion, ) -> ClientResult { if storage.top.keys().any(|k| well_known_keys::is_child_storage_key(k)) { - return Err(sp_blockchain::Error::InvalidState); + return Err(sp_blockchain::Error::InvalidState) } let child_delta = storage.children_default.values().map(|child_content| { @@ -1180,9 +1172,9 @@ impl Backend { // Older DB versions have no last state key. Check if the state is available and set it. let info = backend.blockchain.info(); - if info.finalized_state.is_none() - && info.finalized_hash != Default::default() - && sc_client_api::Backend::have_state_at( + if info.finalized_state.is_none() && + info.finalized_hash != Default::default() && + sc_client_api::Backend::have_state_at( &backend, info.finalized_hash, info.finalized_number, @@ -1221,11 +1213,11 @@ impl Backend { let meta = self.blockchain.meta.read(); - if meta.best_number > best_number - && (meta.best_number - best_number).saturated_into::() - > self.canonicalization_delay + if meta.best_number > best_number && + (meta.best_number - best_number).saturated_into::() > + self.canonicalization_delay { - return Err(sp_blockchain::Error::SetHeadTooOld); + return Err(sp_blockchain::Error::SetHeadTooOld) } let parent_exists = @@ -1244,7 +1236,7 @@ impl Backend { (&r.number, &r.hash) ); - return Err(sp_blockchain::Error::NotInFinalizedChain); + return Err(sp_blockchain::Error::NotInFinalizedChain) } retracted.push(r.hash); @@ -1282,14 +1274,14 @@ impl Backend { ) -> ClientResult<()> { let last_finalized = last_finalized.unwrap_or_else(|| self.blockchain.meta.read().finalized_hash); - if last_finalized != self.blockchain.meta.read().genesis_hash - && *header.parent_hash() != last_finalized + if last_finalized != self.blockchain.meta.read().genesis_hash && + *header.parent_hash() != last_finalized { return Err(sp_blockchain::Error::NonSequentialFinalization(format!( "Last finalized {:?} not parent of {:?}", last_finalized, header.hash() - ))); + ))) } Ok(()) } @@ -1332,7 +1324,7 @@ impl Backend { let new_canonical = number_u64 - self.canonicalization_delay; if new_canonical <= self.storage.state_db.best_canonical().unwrap_or(0) { - return Ok(()); + return Ok(()) } let hash = if new_canonical == number_u64 { hash @@ -1349,7 +1341,7 @@ impl Backend { })? }; if !sc_client_api::Backend::have_state_at(self, hash, new_canonical.saturated_into()) { - return Ok(()); + return Ok(()) } trace!(target: "db", "Canonicalize block #{} ({:?})", new_canonical, hash); @@ -1532,8 +1524,8 @@ impl Backend { let finalized = number_u64 == 0 || pending_block.leaf_state.is_final(); finalized } else { - (number.is_zero() && last_finalized_num.is_zero()) - || pending_block.leaf_state.is_final() + (number.is_zero() && last_finalized_num.is_zero()) || + pending_block.leaf_state.is_final() }; let header = &pending_block.header; @@ -1622,9 +1614,11 @@ impl Backend { &(start, end).encode(), ); } - } else if number > best_num + One::one() - && number > One::one() - && self.blockchain.header(BlockId::hash(parent_hash))?.is_none() + } else if number > best_num + One::one() && + number > One::one() && self + .blockchain + .header(BlockId::hash(parent_hash))? + .is_none() { let gap = (best_num + One::one(), number - One::one()); transaction.set(columns::META, meta_keys::BLOCK_GAP, &gap.encode()); @@ -1666,7 +1660,7 @@ impl Backend { return Err(sp_blockchain::Error::UnknownBlock(format!( "Cannot set head {:?}", set_head - ))); + ))) } } @@ -1709,9 +1703,8 @@ impl Backend { } transaction.set_from_vec(columns::META, meta_keys::FINALIZED_BLOCK, lookup_key); - if sc_client_api::Backend::have_state_at(self, f_hash, f_num) - && self - .storage + if sc_client_api::Backend::have_state_at(self, f_hash, f_num) && + self.storage .state_db .best_canonical() .map(|c| f_num.saturated_into::() > c) @@ -1819,19 +1812,17 @@ impl Backend { id, )?; match Vec::>::decode(&mut &index[..]) { - Ok(index) => { + Ok(index) => for ex in index { if let DbExtrinsic::Indexed { hash, .. } = ex { transaction.release(columns::TRANSACTION, hash); } - } - }, - Err(err) => { + }, + Err(err) => return Err(sp_blockchain::Error::Backend(format!( "Error decoding body list: {}", err - ))) - }, + ))), } } Ok(()) @@ -2051,17 +2042,17 @@ impl sc_client_api::backend::Backend for Backend { let last_finalized = self.blockchain.last_finalized()?; // We can do a quick check first, before doing a proper but more expensive check - if number > self.blockchain.info().finalized_number - || (hash != last_finalized && !is_descendent_of(&hash, &last_finalized)?) + if number > self.blockchain.info().finalized_number || + (hash != last_finalized && !is_descendent_of(&hash, &last_finalized)?) { - return Err(ClientError::NotInFinalizedChain); + return Err(ClientError::NotInFinalizedChain) } let justifications = if let Some(mut stored_justifications) = self.blockchain.justifications(hash)? { if !stored_justifications.append(justification) { - return Err(ClientError::BadJustification("Duplicate consensus engine ID".into())); + return Err(ClientError::BadJustification("Duplicate consensus engine ID".into())) } stored_justifications } else { @@ -2147,7 +2138,7 @@ impl sc_client_api::backend::Backend for Backend { let mut revert_blocks = || -> ClientResult> { for c in 0..n.saturated_into::() { if number_to_revert.is_zero() { - return Ok(c.saturated_into::>()); + return Ok(c.saturated_into::>()) } let mut transaction = Transaction::new(); let removed = @@ -2164,7 +2155,7 @@ impl sc_client_api::backend::Backend for Backend { if prev_number == best_number { best_hash } else { *removed.parent_hash() }; if !self.have_state_at(prev_hash, prev_number) { - return Ok(c.saturated_into::>()); + return Ok(c.saturated_into::>()) } match self.storage.state_db.revert_one() { @@ -2190,10 +2181,11 @@ impl sc_client_api::backend::Backend for Backend { reverted_finalized.insert(removed_hash); if let Some((hash, _)) = self.blockchain.info().finalized_state { if hash == hash_to_revert { - if !number_to_revert.is_zero() - && self - .have_state_at(prev_hash, number_to_revert - One::one()) - { + if !number_to_revert.is_zero() && + self.have_state_at( + prev_hash, + number_to_revert - One::one(), + ) { let lookup_key = utils::number_and_hash_to_lookup_key( number_to_revert - One::one(), prev_hash, @@ -2259,10 +2251,7 @@ impl sc_client_api::backend::Backend for Backend { let best_hash = self.blockchain.info().best_hash; if best_hash == hash { - return Err(sp_blockchain::Error::Backend(format!( - "Can't remove best block {:?}", - hash - ))); + return Err(sp_blockchain::Error::Backend(format!("Can't remove best block {:?}", hash))) } let hdr = self.blockchain.header_metadata(hash)?; @@ -2270,7 +2259,7 @@ impl sc_client_api::backend::Backend for Backend { return Err(sp_blockchain::Error::UnknownBlock(format!( "State already discarded for {:?}", hash - ))); + ))) } let mut leaves = self.blockchain.leaves.write(); @@ -2278,7 +2267,7 @@ impl sc_client_api::backend::Backend for Backend { return Err(sp_blockchain::Error::Backend(format!( "Can't remove non-leaf block {:?}", hash - ))); + ))) } let mut transaction = Transaction::new(); @@ -2318,7 +2307,7 @@ impl sc_client_api::backend::Backend for Backend { if let Some(outcome) = remove_outcome { leaves.undo().undo_remove(outcome); } - return Err(e.into()); + return Err(e.into()) } self.blockchain().remove_header_metadata(hash); Ok(()) @@ -2337,7 +2326,7 @@ impl sc_client_api::backend::Backend for Backend { .build(); let state = RefTrackingState::new(db_state, self.storage.clone(), None); - return Ok(RecordStatsState::new(state, None, self.state_usage.clone())); + return Ok(RecordStatsState::new(state, None, self.state_usage.clone())) } } diff --git a/client/db/src/parity_db.rs b/client/db/src/parity_db.rs index 4fdf8ef6896dd..4adacbf6f041c 100644 --- a/client/db/src/parity_db.rs +++ b/client/db/src/parity_db.rs @@ -97,16 +97,15 @@ impl> Database for DbAdapter { Some(match change { Change::Set(col, key, value) => (col as u8, key, Some(value)), Change::Remove(col, key) => (col as u8, key, None), - Change::Store(col, key, value) => { + Change::Store(col, key, value) => if ref_counted_column(col) { (col as u8, key.as_ref().to_vec(), Some(value)) } else { if !not_ref_counted_column.contains(&col) { not_ref_counted_column.push(col); } - return None; - } - }, + return None + }, Change::Reference(col, key) => { if ref_counted_column(col) { // FIXME accessing value is not strictly needed, optimize this in parity-db. @@ -116,19 +115,18 @@ impl> Database for DbAdapter { if !not_ref_counted_column.contains(&col) { not_ref_counted_column.push(col); } - return None; + return None } }, - Change::Release(col, key) => { + Change::Release(col, key) => if ref_counted_column(col) { (col as u8, key.as_ref().to_vec(), None) } else { if !not_ref_counted_column.contains(&col) { not_ref_counted_column.push(col); } - return None; - } - }, + return None + }, }) })); @@ -136,7 +134,7 @@ impl> Database for DbAdapter { return Err(DatabaseError(Box::new(parity_db::Error::InvalidInput(format!( "Ref counted operation on non ref counted columns {:?}", not_ref_counted_column - ))))); + ))))) } result.map_err(|e| DatabaseError(Box::new(e))) diff --git a/client/db/src/upgrade.rs b/client/db/src/upgrade.rs index 8350e06bd9ed1..51750bf689759 100644 --- a/client/db/src/upgrade.rs +++ b/client/db/src/upgrade.rs @@ -163,9 +163,8 @@ fn migrate_3_to_4(db_path: &Path, _db_type: DatabaseType) -> Upgr /// If the file does not exist returns 0. fn current_version(path: &Path) -> UpgradeResult { match fs::File::open(version_file_path(path)) { - Err(ref err) if err.kind() == ErrorKind::NotFound => { - Err(UpgradeError::MissingDatabaseVersionFile) - }, + Err(ref err) if err.kind() == ErrorKind::NotFound => + Err(UpgradeError::MissingDatabaseVersionFile), Err(_) => Err(UpgradeError::UnknownDatabaseVersion), Ok(mut file) => { let mut s = String::new(); diff --git a/client/db/src/utils.rs b/client/db/src/utils.rs index e9f88a8dab663..567950d089e1b 100644 --- a/client/db/src/utils.rs +++ b/client/db/src/utils.rs @@ -193,12 +193,11 @@ fn open_database_at( let db: Arc> = match &db_source { DatabaseSource::ParityDb { path } => open_parity_db::(path, db_type, create)?, #[cfg(feature = "rocksdb")] - DatabaseSource::RocksDb { path, cache_size } => { - open_kvdb_rocksdb::(path, db_type, create, *cache_size)? - }, + DatabaseSource::RocksDb { path, cache_size } => + open_kvdb_rocksdb::(path, db_type, create, *cache_size)?, DatabaseSource::Custom { db, require_create_flag } => { if *require_create_flag && !create { - return Err(OpenDbError::DoesNotExist); + return Err(OpenDbError::DoesNotExist) } db.clone() }, @@ -206,9 +205,8 @@ fn open_database_at( // check if rocksdb exists first, if not, open paritydb match open_kvdb_rocksdb::(rocksdb_path, db_type, false, *cache_size) { Ok(db) => db, - Err(OpenDbError::NotEnabled(_)) | Err(OpenDbError::DoesNotExist) => { - open_parity_db::(paritydb_path, db_type, create)? - }, + Err(OpenDbError::NotEnabled(_)) | Err(OpenDbError::DoesNotExist) => + open_parity_db::(paritydb_path, db_type, create)?, Err(as_is) => return Err(as_is), } }, @@ -361,14 +359,13 @@ pub fn check_database_type( db_type: DatabaseType, ) -> Result<(), OpenDbError> { match db.get(COLUMN_META, meta_keys::TYPE) { - Some(stored_type) => { + Some(stored_type) => if db_type.as_str().as_bytes() != &*stored_type { return Err(OpenDbError::UnexpectedDbType { expected: db_type, found: stored_type.to_owned(), - }); - } - }, + }) + }, None => { let mut transaction = Transaction::new(); transaction.set(COLUMN_META, meta_keys::TYPE, db_type.as_str().as_bytes()); @@ -390,8 +387,8 @@ fn maybe_migrate_to_type_subdir( // Do we have to migrate to a database-type-based subdirectory layout: // See if there's a file identifying a rocksdb or paritydb folder in the parent dir and // the target path ends in a role specific directory - if (basedir.join("db_version").exists() || basedir.join("metadata").exists()) - && (p.ends_with(DatabaseType::Full.as_str())) + if (basedir.join("db_version").exists() || basedir.join("metadata").exists()) && + (p.ends_with(DatabaseType::Full.as_str())) { // Try to open the database to check if the current `DatabaseType` matches the type of // database stored in the target directory and close the database on success. @@ -478,7 +475,7 @@ where { let genesis_hash: Block::Hash = match read_genesis_hash(db)? { Some(genesis_hash) => genesis_hash, - None => { + None => return Ok(Meta { best_hash: Default::default(), best_number: Zero::zero(), @@ -487,8 +484,7 @@ where genesis_hash: Default::default(), finalized_state: None, block_gap: None, - }) - }, + }), }; let load_meta_block = |desc, key| -> Result<_, sp_blockchain::Error> { @@ -542,9 +538,8 @@ pub fn read_genesis_hash( match db.get(COLUMN_META, meta_keys::GENESIS_HASH) { Some(h) => match Decode::decode(&mut &h[..]) { Ok(h) => Ok(Some(h)), - Err(err) => { - Err(sp_blockchain::Error::Backend(format!("Error decoding genesis hash: {}", err))) - }, + Err(err) => + Err(sp_blockchain::Error::Backend(format!("Error decoding genesis hash: {}", err))), }, None => Ok(None), } diff --git a/client/executor/benches/bench.rs b/client/executor/benches/bench.rs index 6a05ed9da47fd..a282cdfbdd334 100644 --- a/client/executor/benches/bench.rs +++ b/client/executor/benches/bench.rs @@ -237,7 +237,7 @@ fn bench_call_instance(c: &mut Criterion) { for thread_count in thread_counts { if thread_count > num_cpus { // If there are not enough cores available the benchmark is pointless. - continue; + continue } let benchmark_name = format!( diff --git a/client/executor/common/src/runtime_blob/data_segments_snapshot.rs b/client/executor/common/src/runtime_blob/data_segments_snapshot.rs index ba533d09db198..e65fc32f637a6 100644 --- a/client/executor/common/src/runtime_blob/data_segments_snapshot.rs +++ b/client/executor/common/src/runtime_blob/data_segments_snapshot.rs @@ -49,7 +49,7 @@ impl DataSegmentsSnapshot { // [op, End] if init_expr.len() != 2 { - return Err(Error::InitializerHasTooManyExpressions); + return Err(Error::InitializerHasTooManyExpressions) } let offset = match &init_expr[0] { Instruction::I32Const(v) => *v as u32, @@ -60,7 +60,7 @@ impl DataSegmentsSnapshot { // At the moment of writing the Substrate Runtime Interface does not provide // any globals. There is nothing that prevents us from supporting this // if/when we gain those. - return Err(Error::ImportedGlobalsUnsupported); + return Err(Error::ImportedGlobalsUnsupported) }, insn => return Err(Error::InvalidInitializerExpression(format!("{:?}", insn))), }; diff --git a/client/executor/common/src/runtime_blob/runtime_blob.rs b/client/executor/common/src/runtime_blob/runtime_blob.rs index 05efced2ce0e5..08df4b32d59eb 100644 --- a/client/executor/common/src/runtime_blob/runtime_blob.rs +++ b/client/executor/common/src/runtime_blob/runtime_blob.rs @@ -151,7 +151,7 @@ impl RuntimeBlob { .entries_mut() .push(ExportEntry::new(memory_name, Internal::Memory(0))); - break; + break } Ok(()) @@ -176,7 +176,7 @@ impl RuntimeBlob { .ok_or_else(|| WasmError::Other("no memory section found".into()))?; if memory_section.entries().is_empty() { - return Err(WasmError::Other("memory section is empty".into())); + return Err(WasmError::Other("memory section is empty".into())) } for memory_ty in memory_section.entries_mut() { let min = memory_ty.limits().initial().saturating_add(extra_heap_pages); @@ -190,9 +190,8 @@ impl RuntimeBlob { pub(super) fn exported_internal_global_names(&self) -> impl Iterator { let exports = self.raw_module.export_section().map(|es| es.entries()).unwrap_or(&[]); exports.iter().filter_map(|export| match export.internal() { - Internal::Global(_) if export.field().starts_with("exported_internal_global") => { - Some(export.field()) - }, + Internal::Global(_) if export.field().starts_with("exported_internal_global") => + Some(export.field()), _ => None, }) } diff --git a/client/executor/common/src/sandbox.rs b/client/executor/common/src/sandbox.rs index 90a6ba1e1597a..1e925bd5a7835 100644 --- a/client/executor/common/src/sandbox.rs +++ b/client/executor/common/src/sandbox.rs @@ -200,14 +200,12 @@ impl SandboxInstance { sandbox_context: &mut dyn SandboxContext, ) -> std::result::Result, error::Error> { match &self.backend_instance { - BackendInstance::Wasmi(wasmi_instance) => { - wasmi_invoke(self, wasmi_instance, export_name, args, state, sandbox_context) - }, + BackendInstance::Wasmi(wasmi_instance) => + wasmi_invoke(self, wasmi_instance, export_name, args, state, sandbox_context), #[cfg(feature = "wasmer-sandbox")] - BackendInstance::Wasmer(wasmer_instance) => { - wasmer_invoke(wasmer_instance, export_name, args, state, sandbox_context) - }, + BackendInstance::Wasmer(wasmer_instance) => + wasmer_invoke(wasmer_instance, export_name, args, state, sandbox_context), } } @@ -408,9 +406,8 @@ impl BackendContext { SandboxBackend::TryWasmer => BackendContext::Wasmi, #[cfg(feature = "wasmer-sandbox")] - SandboxBackend::Wasmer | SandboxBackend::TryWasmer => { - BackendContext::Wasmer(WasmerBackend::new()) - }, + SandboxBackend::Wasmer | SandboxBackend::TryWasmer => + BackendContext::Wasmer(WasmerBackend::new()), } } } @@ -566,9 +563,8 @@ impl Store

{ BackendContext::Wasmi => wasmi_instantiate(wasm, guest_env, state, sandbox_context)?, #[cfg(feature = "wasmer-sandbox")] - BackendContext::Wasmer(ref context) => { - wasmer_instantiate(context, wasm, guest_env, state, sandbox_context)? - }, + BackendContext::Wasmer(ref context) => + wasmer_instantiate(context, wasm, guest_env, state, sandbox_context)?, }; Ok(UnregisteredInstance { sandbox_instance }) diff --git a/client/executor/common/src/sandbox/wasmer_backend.rs b/client/executor/common/src/sandbox/wasmer_backend.rs index 0890feab65403..29926141ed8b8 100644 --- a/client/executor/common/src/sandbox/wasmer_backend.rs +++ b/client/executor/common/src/sandbox/wasmer_backend.rs @@ -85,12 +85,11 @@ pub fn invoke( wasmer::Val::I64(val) => Value::I64(val), wasmer::Val::F32(val) => Value::F32(f32::to_bits(val)), wasmer::Val::F64(val) => Value::F64(f64::to_bits(val)), - _ => { + _ => return Err(Error::Sandbox(format!( "Unsupported return value: {:?}", wasm_value, - ))) - }, + ))), }; Ok(Some(wasmer_value)) @@ -161,7 +160,7 @@ pub fn instantiate( index } else { // Missing import (should we abort here?) - continue; + continue }; let supervisor_func_index = guest_env @@ -190,9 +189,8 @@ pub fn instantiate( wasmer::Instance::new(&module, &import_object).map_err(|error| match error { wasmer::InstantiationError::Link(_) => InstantiationError::Instantiation, wasmer::InstantiationError::Start(_) => InstantiationError::StartTrapped, - wasmer::InstantiationError::HostEnvInitialization(_) => { - InstantiationError::EnvironmentDefinitionCorrupted - }, + wasmer::InstantiationError::HostEnvInitialization(_) => + InstantiationError::EnvironmentDefinitionCorrupted, wasmer::InstantiationError::CpuFeature(_) => InstantiationError::CpuFeature, }) })?; @@ -219,9 +217,8 @@ fn dispatch_function( wasmer::Val::I64(val) => Ok(Value::I64(*val)), wasmer::Val::F32(val) => Ok(Value::F32(f32::to_bits(*val))), wasmer::Val::F64(val) => Ok(Value::F64(f64::to_bits(*val))), - _ => { - Err(RuntimeError::new(format!("Unsupported function argument: {:?}", val))) - }, + _ => + Err(RuntimeError::new(format!("Unsupported function argument: {:?}", val))), }) .collect::, _>>()? .encode(); @@ -249,7 +246,7 @@ fn dispatch_function( "Failed dealloction after failed write of invoke arguments", )?; - return Err(RuntimeError::new("Can't write invoke args into memory")); + return Err(RuntimeError::new("Can't write invoke args into memory")) } // Perform the actuall call diff --git a/client/executor/runtime-test/src/lib.rs b/client/executor/runtime-test/src/lib.rs index c198400656017..0424ad418617b 100644 --- a/client/executor/runtime-test/src/lib.rs +++ b/client/executor/runtime-test/src/lib.rs @@ -451,7 +451,7 @@ where args: &[Value], ) -> Result { if args.len() != 1 { - return Err(sp_sandbox::HostError); + return Err(sp_sandbox::HostError) } let condition = args[0].as_i32().ok_or_else(|| sp_sandbox::HostError)?; if condition != 0 { @@ -465,7 +465,7 @@ where args: &[Value], ) -> Result { if args.len() != 1 { - return Err(sp_sandbox::HostError); + return Err(sp_sandbox::HostError) } let inc_by = args[0].as_i32().ok_or_else(|| sp_sandbox::HostError)?; e.counter += inc_by as u32; diff --git a/client/executor/src/native_executor.rs b/client/executor/src/native_executor.rs index 6013619d46954..0eabffb8c87df 100644 --- a/client/executor/src/native_executor.rs +++ b/client/executor/src/native_executor.rs @@ -299,7 +299,7 @@ where .map_err(|e| format!("Failed to read the static section: {:?}", e)) .map(|v| v.map(|v| v.encode()))? { - return Ok(version); + return Ok(version) } // If the blob didn't have embedded runtime version section, we fallback to the legacy diff --git a/client/executor/src/wasm_runtime.rs b/client/executor/src/wasm_runtime.rs index 090ca99b693d4..5576fff186bb2 100644 --- a/client/executor/src/wasm_runtime.rs +++ b/client/executor/src/wasm_runtime.rs @@ -329,7 +329,7 @@ where }, }, ) - .map(|runtime| -> Arc { Arc::new(runtime) }) + .map(|runtime| -> Arc { Arc::new(runtime) }), } } diff --git a/client/executor/wasmi/src/lib.rs b/client/executor/wasmi/src/lib.rs index 41341db22b994..1284cc23e4c96 100644 --- a/client/executor/wasmi/src/lib.rs +++ b/client/executor/wasmi/src/lib.rs @@ -163,7 +163,7 @@ impl Sandbox for FunctionExecutor { }; if self.memory.set(buf_ptr.into(), &buffer).is_err() { - return Ok(sandbox_env::ERR_OUT_OF_BOUNDS); + return Ok(sandbox_env::ERR_OUT_OF_BOUNDS) } Ok(sandbox_env::ERR_OK) @@ -188,7 +188,7 @@ impl Sandbox for FunctionExecutor { }; if sandboxed_memory.write_from(Pointer::new(offset as u32), &buffer).is_err() { - return Ok(sandbox_env::ERR_OUT_OF_BOUNDS); + return Ok(sandbox_env::ERR_OUT_OF_BOUNDS) } Ok(sandbox_env::ERR_OK) @@ -245,7 +245,7 @@ impl Sandbox for FunctionExecutor { // Serialize return value and write it back into the memory. sp_wasm_interface::ReturnValue::Value(val).using_encoded(|val| { if val.len() > return_val_len as usize { - return Err("Return value buffer is too small".into()); + return Err("Return value buffer is too small".into()) } self.write_memory(return_val, val).map_err(|_| "Return value buffer is OOB")?; Ok(sandbox_env::ERR_OK) @@ -362,14 +362,14 @@ impl<'a> wasmi::ModuleImportResolver for Resolver<'a> { for (function_index, function) in self.host_functions.iter().enumerate() { if name == function.name() { if signature == function.signature() { - return Ok(wasmi::FuncInstance::alloc_host(signature.into(), function_index)); + return Ok(wasmi::FuncInstance::alloc_host(signature.into(), function_index)) } else { return Err(wasmi::Error::Instantiation(format!( "Invalid signature for function `{}` expected `{:?}`, got `{:?}`", function.name(), signature, function.signature(), - ))); + ))) } } } @@ -392,9 +392,8 @@ impl<'a> wasmi::ModuleImportResolver for Resolver<'a> { ) -> Result { if field_name == "memory" { match &mut *self.import_memory.borrow_mut() { - Some(_) => { - Err(wasmi::Error::Instantiation("Memory can not be imported twice!".into())) - }, + Some(_) => + Err(wasmi::Error::Instantiation("Memory can not be imported twice!".into())), memory_ref @ None => { if memory_type .maximum() @@ -443,9 +442,9 @@ impl wasmi::Externals for FunctionExecutor { .map_err(|msg| Error::FunctionExecution(function.name().to_string(), msg)) .map_err(wasmi::Trap::from) .map(|v| v.map(Into::into)) - } else if self.allow_missing_func_imports - && index >= self.host_functions.len() - && index < self.host_functions.len() + self.missing_functions.len() + } else if self.allow_missing_func_imports && + index >= self.host_functions.len() && + index < self.host_functions.len() + self.missing_functions.len() { Err(Error::from(format!( "Function `{}` is only a stub. Calling a stub is not allowed.", diff --git a/client/executor/wasmtime/src/host.rs b/client/executor/wasmtime/src/host.rs index b9089d5b59b83..768a6e36e2390 100644 --- a/client/executor/wasmtime/src/host.rs +++ b/client/executor/wasmtime/src/host.rs @@ -171,7 +171,7 @@ impl<'a> Sandbox for HostContext<'a> { }; if util::write_memory_from(&mut self.caller, buf_ptr, &buffer).is_err() { - return Ok(sandbox_env::ERR_OUT_OF_BOUNDS); + return Ok(sandbox_env::ERR_OUT_OF_BOUNDS) } Ok(sandbox_env::ERR_OK) @@ -194,7 +194,7 @@ impl<'a> Sandbox for HostContext<'a> { }; if sandboxed_memory.write_from(Pointer::new(offset as u32), &buffer).is_err() { - return Ok(sandbox_env::ERR_OUT_OF_BOUNDS); + return Ok(sandbox_env::ERR_OUT_OF_BOUNDS) } Ok(sandbox_env::ERR_OK) @@ -243,7 +243,7 @@ impl<'a> Sandbox for HostContext<'a> { // Serialize return value and write it back into the memory. sp_wasm_interface::ReturnValue::Value(val.into()).using_encoded(|val| { if val.len() > return_val_len as usize { - return Err("Return value buffer is too small".into()); + return Err("Return value buffer is too small".into()) } ::write_memory(self, return_val, val) .map_err(|_| "can't write return value")?; @@ -360,13 +360,12 @@ impl<'a, 'b> sandbox::SandboxContext for SandboxContext<'a, 'b> { ); match result { - Ok(()) => { + Ok(()) => if let Some(ret_val) = ret_vals[0].i64() { Ok(ret_val) } else { Err("Supervisor function returned unexpected result!".into()) - } - }, + }, Err(err) => Err(err.to_string().into()), } } diff --git a/client/executor/wasmtime/src/imports.rs b/client/executor/wasmtime/src/imports.rs index c247cd24033a1..c80952a2541ce 100644 --- a/client/executor/wasmtime/src/imports.rs +++ b/client/executor/wasmtime/src/imports.rs @@ -41,20 +41,19 @@ where "host doesn't provide any imports from non-env module: {}:{}", import_ty.module(), name, - ))); + ))) } match import_ty.ty() { ExternType::Func(func_ty) => { pending_func_imports.insert(name.to_owned(), (import_ty, func_ty)); }, - _ => { + _ => return Err(WasmError::Other(format!( "host doesn't provide any non function imports: {}:{}", import_ty.module(), name, - ))) - }, + ))), }; } @@ -81,7 +80,7 @@ where return Err(WasmError::Other(format!( "runtime requires function imports which are not present on the host: {}", names - ))); + ))) } } diff --git a/client/executor/wasmtime/src/instance_wrapper.rs b/client/executor/wasmtime/src/instance_wrapper.rs index 78f12844e6fe8..feded4008068d 100644 --- a/client/executor/wasmtime/src/instance_wrapper.rs +++ b/client/executor/wasmtime/src/instance_wrapper.rs @@ -64,12 +64,10 @@ impl EntryPoint { let data_len = u32::from(data_len); match self.call_type { - EntryPointType::Direct { ref entrypoint } => { - entrypoint.call(&mut *store, (data_ptr, data_len)) - }, - EntryPointType::Wrapped { func, ref dispatcher } => { - dispatcher.call(&mut *store, (func, data_ptr, data_len)) - }, + EntryPointType::Direct { ref entrypoint } => + entrypoint.call(&mut *store, (data_ptr, data_len)), + EntryPointType::Wrapped { func, ref dispatcher } => + dispatcher.call(&mut *store, (func, data_ptr, data_len)), } .map_err(|trap| { let host_state = store @@ -339,7 +337,7 @@ impl InstanceWrapper { /// as a side-effect. pub fn decommit(&mut self) { if self.memory.data_size(&self.store) == 0 { - return; + return } cfg_if::cfg_if! { diff --git a/client/executor/wasmtime/src/runtime.rs b/client/executor/wasmtime/src/runtime.rs index 174c0b0c333ef..5bca899648c34 100644 --- a/client/executor/wasmtime/src/runtime.rs +++ b/client/executor/wasmtime/src/runtime.rs @@ -244,12 +244,10 @@ impl WasmInstance for WasmtimeInstance { fn get_global_const(&mut self, name: &str) -> Result> { match &mut self.strategy { - Strategy::LegacyInstanceReuse { instance_wrapper, .. } => { - instance_wrapper.get_global_val(name) - }, - Strategy::RecreateInstance(ref mut instance_creator) => { - instance_creator.instantiate()?.get_global_val(name) - }, + Strategy::LegacyInstanceReuse { instance_wrapper, .. } => + instance_wrapper.get_global_val(name), + Strategy::RecreateInstance(ref mut instance_creator) => + instance_creator.instantiate()?.get_global_val(name), } } @@ -260,9 +258,8 @@ impl WasmInstance for WasmtimeInstance { // associated with it. None }, - Strategy::LegacyInstanceReuse { instance_wrapper, .. } => { - Some(instance_wrapper.base_ptr()) - }, + Strategy::LegacyInstanceReuse { instance_wrapper, .. } => + Some(instance_wrapper.base_ptr()), } } } @@ -666,12 +663,11 @@ where }), ) }, - InstantiationStrategy::Pooling - | InstantiationStrategy::PoolingCopyOnWrite - | InstantiationStrategy::RecreateInstance - | InstantiationStrategy::RecreateInstanceCopyOnWrite => { - (module, InternalInstantiationStrategy::Builtin) - }, + InstantiationStrategy::Pooling | + InstantiationStrategy::PoolingCopyOnWrite | + InstantiationStrategy::RecreateInstance | + InstantiationStrategy::RecreateInstanceCopyOnWrite => + (module, InternalInstantiationStrategy::Builtin), } }, CodeSupplyMode::Precompiled(compiled_artifact_path) => { diff --git a/client/executor/wasmtime/src/util.rs b/client/executor/wasmtime/src/util.rs index 07f9c7213ed15..83745e21e86af 100644 --- a/client/executor/wasmtime/src/util.rs +++ b/client/executor/wasmtime/src/util.rs @@ -158,8 +158,8 @@ pub(crate) fn replace_strategy_if_broken(strategy: &mut InstantiationStrategy) { // These strategies require a working `madvise` to be sound. InstantiationStrategy::PoolingCopyOnWrite => InstantiationStrategy::Pooling, - InstantiationStrategy::RecreateInstanceCopyOnWrite - | InstantiationStrategy::LegacyInstanceReuse => InstantiationStrategy::RecreateInstance, + InstantiationStrategy::RecreateInstanceCopyOnWrite | + InstantiationStrategy::LegacyInstanceReuse => InstantiationStrategy::RecreateInstance, }; use once_cell::sync::OnceCell; diff --git a/client/finality-grandpa/src/authorities.rs b/client/finality-grandpa/src/authorities.rs index b3609a747104e..0803e6b3c2931 100644 --- a/client/finality-grandpa/src/authorities.rs +++ b/client/finality-grandpa/src/authorities.rs @@ -183,7 +183,7 @@ where /// Get a genesis set with given authorities. pub(crate) fn genesis(initial: AuthorityList) -> Option { if Self::invalid_authority_list(&initial) { - return None; + return None } Some(AuthoritySet { @@ -204,7 +204,7 @@ where authority_set_changes: AuthoritySetChanges, ) -> Option { if Self::invalid_authority_list(&authorities) { - return None; + return None } Some(AuthoritySet { @@ -230,8 +230,8 @@ where F: Fn(&H, &H) -> Result, { let filter = |node_hash: &H, node_num: &N, _: &PendingChange| { - if number >= *node_num - && (is_descendent_of(node_hash, &hash).unwrap_or_default() || *node_hash == hash) + if number >= *node_num && + (is_descendent_of(node_hash, &hash).unwrap_or_default() || *node_hash == hash) { // Continue the search in this subtree. FilterAction::KeepNode @@ -278,7 +278,7 @@ where for change in &self.pending_forced_changes { if is_descendent_of(&change.canon_hash, best_hash)? { forced = Some((change.canon_hash.clone(), change.canon_height.clone())); - break; + break } } @@ -286,14 +286,13 @@ where for (_, _, change) in self.pending_standard_changes.roots() { if is_descendent_of(&change.canon_hash, best_hash)? { standard = Some((change.canon_hash.clone(), change.canon_height.clone())); - break; + break } } let earliest = match (forced, standard) { - (Some(forced), Some(standard)) => { - Some(if forced.1 < standard.1 { forced } else { standard }) - }, + (Some(forced), Some(standard)) => + Some(if forced.1 < standard.1 { forced } else { standard }), (Some(forced), None) => Some(forced), (None, Some(standard)) => Some(standard), (None, None) => None, @@ -345,11 +344,11 @@ where { for change in &self.pending_forced_changes { if change.canon_hash == pending.canon_hash { - return Err(Error::DuplicateAuthoritySetChange); + return Err(Error::DuplicateAuthoritySetChange) } if is_descendent_of(&change.canon_hash, &pending.canon_hash)? { - return Err(Error::MultiplePendingForcedAuthoritySetChanges); + return Err(Error::MultiplePendingForcedAuthoritySetChanges) } } @@ -392,7 +391,7 @@ where E: std::error::Error, { if Self::invalid_authority_list(&pending.next_authorities) { - return Err(Error::InvalidAuthoritySet); + return Err(Error::InvalidAuthoritySet) } match pending.delay_kind { @@ -473,8 +472,8 @@ where // check if there's any pending standard change that we depend on for (_, _, standard_change) in self.pending_standard_changes.roots() { - if standard_change.effective_number() <= median_last_finalized - && is_descendent_of(&standard_change.canon_hash, &change.canon_hash)? + if standard_change.effective_number() <= median_last_finalized && + is_descendent_of(&standard_change.canon_hash, &change.canon_hash)? { log::info!(target: "afg", "Not applying authority set change forced at block #{:?}, due to pending standard change at block #{:?}", @@ -484,7 +483,7 @@ where return Err(Error::ForcedAuthoritySetChangeDependencyUnsatisfied( standard_change.effective_number(), - )); + )) } } @@ -516,7 +515,7 @@ where }, )); - break; + break } } @@ -563,8 +562,8 @@ where // we will keep all forced changes for any later blocks and that are a // descendent of the finalized block (i.e. they are part of this branch). for change in pending_forced_changes { - if change.effective_number() > finalized_number - && is_descendent_of(&finalized_hash, &change.canon_hash)? + if change.effective_number() > finalized_number && + is_descendent_of(&finalized_hash, &change.canon_hash)? { self.pending_forced_changes.push(change) } @@ -722,7 +721,7 @@ impl AuthoritySetChanges { .map(|last_auth_change| last_auth_change.1 < block_number) .unwrap_or(false) { - return AuthoritySetChangeId::Latest; + return AuthoritySetChangeId::Latest } let idx = self @@ -735,7 +734,7 @@ impl AuthoritySetChanges { // if this is the first index but not the first set id then we are missing data. if idx == 0 && set_id != 0 { - return AuthoritySetChangeId::Unknown; + return AuthoritySetChangeId::Unknown } AuthoritySetChangeId::Set(set_id, block_number) @@ -772,7 +771,7 @@ impl AuthoritySetChanges { // if this is the first index but not the first set id then we are missing data. if idx == 0 && set_id != 0 { - return None; + return None } } diff --git a/client/finality-grandpa/src/aux_schema.rs b/client/finality-grandpa/src/aux_schema.rs index 24c541261ceb2..235453ea35df1 100644 --- a/client/finality-grandpa/src/aux_schema.rs +++ b/client/finality-grandpa/src/aux_schema.rs @@ -210,7 +210,7 @@ where backend.insert_aux(&[(SET_STATE_KEY, set_state.encode().as_slice())], &[])?; - return Ok(Some((new_set, set_state))); + return Ok(Some((new_set, set_state))) } Ok(None) @@ -274,7 +274,7 @@ where backend.insert_aux(&[(SET_STATE_KEY, set_state.encode().as_slice())], &[])?; - return Ok(Some((set, set_state))); + return Ok(Some((set, set_state))) } Ok(None) @@ -307,7 +307,7 @@ where }, }; - return Ok(Some((new_set, set_state))); + return Ok(Some((new_set, set_state))) } Ok(None) @@ -336,7 +336,7 @@ where return Ok(PersistentData { authority_set: new_set.into(), set_state: set_state.into(), - }); + }) } }, Some(1) => { @@ -346,7 +346,7 @@ where return Ok(PersistentData { authority_set: new_set.into(), set_state: set_state.into(), - }); + }) } }, Some(2) => { @@ -356,7 +356,7 @@ where return Ok(PersistentData { authority_set: new_set.into(), set_state: set_state.into(), - }); + }) } }, Some(3) => { @@ -376,18 +376,11 @@ where }, }; - return Ok(PersistentData { - authority_set: set.into(), - set_state: set_state.into(), - }); + return Ok(PersistentData { authority_set: set.into(), set_state: set_state.into() }) } }, - Some(other) => { - return Err(ClientError::Backend(format!( - "Unsupported GRANDPA DB version: {:?}", - other - ))) - }, + Some(other) => + return Err(ClientError::Backend(format!("Unsupported GRANDPA DB version: {:?}", other))), } // genesis. diff --git a/client/finality-grandpa/src/communication/gossip.rs b/client/finality-grandpa/src/communication/gossip.rs index 3c304b9ceadad..ce85ca842aa52 100644 --- a/client/finality-grandpa/src/communication/gossip.rs +++ b/client/finality-grandpa/src/communication/gossip.rs @@ -166,18 +166,18 @@ impl View { fn consider_vote(&self, round: Round, set_id: SetId) -> Consider { // only from current set if set_id < self.set_id { - return Consider::RejectPast; + return Consider::RejectPast } if set_id > self.set_id { - return Consider::RejectFuture; + return Consider::RejectFuture } // only r-1 ... r+1 if round.0 > self.round.0.saturating_add(1) { - return Consider::RejectFuture; + return Consider::RejectFuture } if round.0 < self.round.0.saturating_sub(1) { - return Consider::RejectPast; + return Consider::RejectPast } Consider::Accept @@ -188,23 +188,22 @@ impl View { fn consider_global(&self, set_id: SetId, number: N) -> Consider { // only from current set if set_id < self.set_id { - return Consider::RejectPast; + return Consider::RejectPast } if set_id > self.set_id { - return Consider::RejectFuture; + return Consider::RejectFuture } // only commits which claim to prove a higher block number than // the one we're aware of. match self.last_commit { None => Consider::Accept, - Some(ref num) => { + Some(ref num) => if num < &number { Consider::Accept } else { Consider::RejectPast - } - }, + }, } } } @@ -552,22 +551,22 @@ impl Peers { Some(p) => p, }; - let invalid_change = peer.view.set_id > update.set_id - || peer.view.round > update.round && peer.view.set_id == update.set_id - || peer.view.last_commit.as_ref() > Some(&update.commit_finalized_height); + let invalid_change = peer.view.set_id > update.set_id || + peer.view.round > update.round && peer.view.set_id == update.set_id || + peer.view.last_commit.as_ref() > Some(&update.commit_finalized_height); if invalid_change { - return Err(Misbehavior::InvalidViewChange); + return Err(Misbehavior::InvalidViewChange) } let now = Instant::now(); - let duplicate_packet = (update.set_id, update.round, Some(&update.commit_finalized_height)) - == (peer.view.set_id, peer.view.round, peer.view.last_commit.as_ref()); + let duplicate_packet = (update.set_id, update.round, Some(&update.commit_finalized_height)) == + (peer.view.set_id, peer.view.round, peer.view.last_commit.as_ref()); if duplicate_packet { if let Some(last_update) = peer.view.last_update { if now < last_update + self.neighbor_rebroadcast_period / 2 { - return Err(Misbehavior::DuplicateNeighborMessage); + return Err(Misbehavior::DuplicateNeighborMessage) } } } @@ -595,7 +594,7 @@ impl Peers { // same height, because there is still a misbehavior condition based on // sending commits that are <= the best we are aware of. if peer.view.last_commit.as_ref() > Some(&new_height) { - return Err(Misbehavior::InvalidViewChange); + return Err(Misbehavior::InvalidViewChange) } peer.view.last_commit = Some(new_height); @@ -645,7 +644,7 @@ impl Peers { } else if n_authorities_added < one_and_a_half_lucky { second_stage_peers.insert(*peer_id); } else { - break; + break } } @@ -654,7 +653,7 @@ impl Peers { let n_second_stage_peers = LUCKY_PEERS.max((shuffled_peers.len() as f32).sqrt() as usize); for (peer_id, info) in &shuffled_peers { if info.roles.is_light() { - continue; + continue } if first_stage_peers.len() < LUCKY_PEERS { @@ -665,7 +664,7 @@ impl Peers { second_stage_peers.insert(*peer_id); } } else { - break; + break } } @@ -794,7 +793,7 @@ impl Inner { if v.round == round { // Do not send neighbor packets out if `round` has not changed --- // such behavior is punishable. - return None; + return None } else { v } @@ -822,8 +821,8 @@ impl Inner { ref mut x @ None => x.get_or_insert(LocalView::new(set_id, Round(1))), Some(ref mut v) => { if v.set_id == set_id { - let diff_authorities = self.authorities.iter().collect::>() - != authorities.iter().collect::>(); + let diff_authorities = self.authorities.iter().collect::>() != + authorities.iter().collect::>(); if diff_authorities { debug!(target: "afg", @@ -835,7 +834,7 @@ impl Inner { } // Do not send neighbor packets out if the `set_id` has not changed --- // such behavior is punishable. - return None; + return None } else { v } @@ -861,13 +860,12 @@ impl Inner { { match self.local_view { None => return None, - Some(ref mut v) => { + Some(ref mut v) => if v.last_commit_height() < Some(&finalized) { v.last_commit = Some((finalized, round, set_id)); } else { - return None; - } - }, + return None + }, }; } @@ -907,12 +905,10 @@ impl Inner { ) -> Action { match self.consider_vote(full.round, full.set_id) { Consider::RejectFuture => return Action::Discard(Misbehavior::FutureMessage.cost()), - Consider::RejectOutOfScope => { - return Action::Discard(Misbehavior::OutOfScopeMessage.cost()) - }, - Consider::RejectPast => { - return Action::Discard(self.cost_past_rejection(who, full.round, full.set_id)) - }, + Consider::RejectOutOfScope => + return Action::Discard(Misbehavior::OutOfScopeMessage.cost()), + Consider::RejectPast => + return Action::Discard(self.cost_past_rejection(who, full.round, full.set_id)), Consider::Accept => {}, } @@ -925,7 +921,7 @@ impl Inner { "afg.bad_msg_signature"; "signature" => ?full.message.id, ); - return Action::Discard(cost::UNKNOWN_VOTER); + return Action::Discard(cost::UNKNOWN_VOTER) } if !sp_finality_grandpa::check_message_signature( @@ -942,7 +938,7 @@ impl Inner { "afg.bad_msg_signature"; "signature" => ?full.message.id, ); - return Action::Discard(cost::BAD_SIGNATURE); + return Action::Discard(cost::BAD_SIGNATURE) } let topic = super::round_topic::(full.round.0, full.set_id.0); @@ -955,22 +951,20 @@ impl Inner { full: &FullCommitMessage, ) -> Action { if let Err(misbehavior) = self.peers.update_commit_height(who, full.message.target_number) { - return Action::Discard(misbehavior.cost()); + return Action::Discard(misbehavior.cost()) } match self.consider_global(full.set_id, full.message.target_number) { Consider::RejectFuture => return Action::Discard(Misbehavior::FutureMessage.cost()), - Consider::RejectPast => { - return Action::Discard(self.cost_past_rejection(who, full.round, full.set_id)) - }, - Consider::RejectOutOfScope => { - return Action::Discard(Misbehavior::OutOfScopeMessage.cost()) - }, + Consider::RejectPast => + return Action::Discard(self.cost_past_rejection(who, full.round, full.set_id)), + Consider::RejectOutOfScope => + return Action::Discard(Misbehavior::OutOfScopeMessage.cost()), Consider::Accept => {}, } - if full.message.precommits.len() != full.message.auth_data.len() - || full.message.precommits.is_empty() + if full.message.precommits.len() != full.message.auth_data.len() || + full.message.precommits.is_empty() { debug!(target: "afg", "Malformed compact commit"); telemetry!( @@ -981,7 +975,7 @@ impl Inner { "auth_data_len" => ?full.message.auth_data.len(), "precommits_is_empty" => ?full.message.precommits.is_empty(), ); - return Action::Discard(cost::MALFORMED_COMMIT); + return Action::Discard(cost::MALFORMED_COMMIT) } // always discard commits initially and rebroadcast after doing full @@ -998,19 +992,19 @@ impl Inner { match &self.pending_catch_up { PendingCatchUp::Requesting { who: peer, request, instant } => { if peer != who { - return Action::Discard(Misbehavior::OutOfScopeMessage.cost()); + return Action::Discard(Misbehavior::OutOfScopeMessage.cost()) } if request.set_id != full.set_id { - return Action::Discard(cost::MALFORMED_CATCH_UP); + return Action::Discard(cost::MALFORMED_CATCH_UP) } if request.round.0 > full.message.round_number { - return Action::Discard(cost::MALFORMED_CATCH_UP); + return Action::Discard(cost::MALFORMED_CATCH_UP) } if full.message.prevotes.is_empty() || full.message.precommits.is_empty() { - return Action::Discard(cost::MALFORMED_CATCH_UP); + return Action::Discard(cost::MALFORMED_CATCH_UP) } // move request to pending processing state, we won't push out @@ -1054,26 +1048,25 @@ impl Inner { // race where the peer sent us the request before it observed that // we had transitioned to a new set. In this case we charge a lower // cost. - if request.set_id.0.saturating_add(1) == local_view.set_id.0 - && local_view.round.0.saturating_sub(CATCH_UP_THRESHOLD) == 0 + if request.set_id.0.saturating_add(1) == local_view.set_id.0 && + local_view.round.0.saturating_sub(CATCH_UP_THRESHOLD) == 0 { - return (None, Action::Discard(cost::HONEST_OUT_OF_SCOPE_CATCH_UP)); + return (None, Action::Discard(cost::HONEST_OUT_OF_SCOPE_CATCH_UP)) } - return (None, Action::Discard(Misbehavior::OutOfScopeMessage.cost())); + return (None, Action::Discard(Misbehavior::OutOfScopeMessage.cost())) } match self.peers.peer(who) { None => return (None, Action::Discard(Misbehavior::OutOfScopeMessage.cost())), - Some(peer) if peer.view.round >= request.round => { - return (None, Action::Discard(Misbehavior::OutOfScopeMessage.cost())) - }, + Some(peer) if peer.view.round >= request.round => + return (None, Action::Discard(Misbehavior::OutOfScopeMessage.cost())), _ => {}, } let last_completed_round = set_state.read().last_completed_round(); if last_completed_round.number < request.round.0 { - return (None, Action::Discard(Misbehavior::OutOfScopeMessage.cost())); + return (None, Action::Discard(Misbehavior::OutOfScopeMessage.cost())) } trace!(target: "afg", "Replying to catch-up request for round {} from {} with round {}", @@ -1138,9 +1131,9 @@ impl Inner { // won't be able to reply since they don't follow the full GRANDPA // protocol and therefore might not have the vote data available. if let (Some(peer), Some(local_view)) = (self.peers.peer(who), &self.local_view) { - if self.catch_up_config.request_allowed(peer) - && peer.view.set_id == local_view.set_id - && peer.view.round.0.saturating_sub(CATCH_UP_THRESHOLD) > local_view.round.0 + if self.catch_up_config.request_allowed(peer) && + peer.view.set_id == local_view.set_id && + peer.view.round.0.saturating_sub(CATCH_UP_THRESHOLD) > local_view.round.0 { // send catch up request if allowed let round = peer.view.round.0 - 1; // peer.view.round is > 0 @@ -1173,9 +1166,8 @@ impl Inner { let update_res = self.peers.update_peer_state(who, update); let (cost_benefit, topics) = match update_res { - Ok(view) => { - (benefit::NEIGHBOR_MESSAGE, view.map(|view| neighbor_topics::(view))) - }, + Ok(view) => + (benefit::NEIGHBOR_MESSAGE, view.map(|view| neighbor_topics::(view))), Err(misbehavior) => (misbehavior.cost(), None), }; @@ -1228,7 +1220,7 @@ impl Inner { let report = match &self.pending_catch_up { PendingCatchUp::Requesting { who: peer, instant, .. } => { if instant.elapsed() <= CATCH_UP_REQUEST_TIMEOUT { - return (false, None); + return (false, None) } else { // report peer for timeout Some((*peer, cost::CATCH_UP_REQUEST_TIMEOUT)) @@ -1236,7 +1228,7 @@ impl Inner { }, PendingCatchUp::Processing { instant, .. } => { if instant.elapsed() < CATCH_UP_PROCESS_TIMEOUT { - return (false, None); + return (false, None) } else { None } @@ -1273,8 +1265,8 @@ impl Inner { if round_elapsed < round_duration.mul_f32(PROPAGATION_SOME) { self.peers.first_stage_peers.contains(who) } else if round_elapsed < round_duration.mul_f32(PROPAGATION_ALL) { - self.peers.first_stage_peers.contains(who) - || self.peers.second_stage_peers.contains(who) + self.peers.first_stage_peers.contains(who) || + self.peers.second_stage_peers.contains(who) } else { self.peers.peer(who).map(|info| !info.roles.is_light()).unwrap_or(false) } @@ -1302,9 +1294,9 @@ impl Inner { }; if round_elapsed < round_duration.mul_f32(PROPAGATION_ALL) { - self.peers.first_stage_peers.contains(who) - || self.peers.second_stage_peers.contains(who) - || self.peers.lucky_light_peers.contains(who) + self.peers.first_stage_peers.contains(who) || + self.peers.second_stage_peers.contains(who) || + self.peers.lucky_light_peers.contains(who) } else { true } @@ -1592,7 +1584,7 @@ impl sc_network_gossip::Validator for GossipValidator sc_network_gossip::Validator for GossipValidator sc_network_gossip::Validator for GossipValidator false, Ok(GossipMessage::CatchUpRequest(_)) => false, @@ -1673,10 +1665,8 @@ impl sc_network_gossip::Validator for GossipValidator // we expire any commit message that doesn't target the same block // as our best commit or isn't from the same round and set id - { - !(full.message.target_number == number - && full.round == round && full.set_id == set_id) - }, + !(full.message.target_number == number && + full.round == round && full.set_id == set_id), None => true, }, Ok(_) => true, @@ -2352,8 +2342,8 @@ mod tests { let test = |rounds_elapsed, peers| { // rewind n round durations - val.inner.write().local_view.as_mut().unwrap().round_start = Instant::now() - - Duration::from_millis( + val.inner.write().local_view.as_mut().unwrap().round_start = Instant::now() - + Duration::from_millis( (round_duration.as_millis() as f32 * rounds_elapsed) as u64, ); diff --git a/client/finality-grandpa/src/communication/mod.rs b/client/finality-grandpa/src/communication/mod.rs index ba1bdf21bcc05..75a7697812c6c 100644 --- a/client/finality-grandpa/src/communication/mod.rs +++ b/client/finality-grandpa/src/communication/mod.rs @@ -347,7 +347,7 @@ impl> NetworkBridge { // check signature. if !voters.contains(&msg.message.id) { debug!(target: "afg", "Skipping message from unknown voter {}", msg.message.id); - return future::ready(None); + return future::ready(None) } if voters.len().get() <= TELEMETRY_VOTERS_LIMIT { @@ -481,11 +481,10 @@ impl> Future for NetworkBridge { Poll::Ready(Some((to, packet))) => { self.gossip_engine.lock().send_message(to, packet.encode()); }, - Poll::Ready(None) => { + Poll::Ready(None) => return Poll::Ready(Err(Error::Network( "Neighbor packet worker stream closed.".into(), - ))) - }, + ))), Poll::Pending => break, } } @@ -495,19 +494,17 @@ impl> Future for NetworkBridge { Poll::Ready(Some(PeerReport { who, cost_benefit })) => { self.gossip_engine.lock().report(who, cost_benefit); }, - Poll::Ready(None) => { + Poll::Ready(None) => return Poll::Ready(Err(Error::Network( "Gossip validator report stream closed.".into(), - ))) - }, + ))), Poll::Pending => break, } } match self.gossip_engine.lock().poll_unpin(cx) { - Poll::Ready(()) => { - return Poll::Ready(Err(Error::Network("Gossip engine future finished.".into()))) - }, + Poll::Ready(()) => + return Poll::Ready(Err(Error::Network("Gossip engine future finished.".into()))), Poll::Pending => {}, } @@ -555,7 +552,7 @@ fn incoming_global( gossip_engine.lock().report(who, cost); } - return None; + return None } let round = msg.round; @@ -607,7 +604,7 @@ fn incoming_global( gossip_engine.lock().report(who, cost); } - return None; + return None } let cb = move |outcome| { @@ -640,12 +637,10 @@ fn incoming_global( }) .filter_map(move |(notification, msg)| { future::ready(match msg { - GossipMessage::Commit(msg) => { - process_commit(msg, notification, &gossip_engine, &gossip_validator, &voters) - }, - GossipMessage::CatchUp(msg) => { - process_catch_up(msg, notification, &gossip_engine, &gossip_validator, &voters) - }, + GossipMessage::Commit(msg) => + process_commit(msg, notification, &gossip_engine, &gossip_validator, &voters), + GossipMessage::CatchUp(msg) => + process_catch_up(msg, notification, &gossip_engine, &gossip_validator, &voters), _ => { debug!(target: "afg", "Skipping unknown message type"); None @@ -777,7 +772,7 @@ impl Sink> for OutgoingMessages { // forward the message to the inner sender. return self.sender.start_send(signed).map_err(|e| { Error::Network(format!("Failed to start_send on channel sender: {:?}", e)) - }); + }) }; Ok(()) @@ -815,16 +810,16 @@ fn check_compact_commit( if let Some(weight) = voters.get(id).map(|info| info.weight()) { total_weight += weight.get(); if total_weight > full_threshold { - return Err(cost::MALFORMED_COMMIT); + return Err(cost::MALFORMED_COMMIT) } } else { debug!(target: "afg", "Skipping commit containing unknown voter {}", id); - return Err(cost::MALFORMED_COMMIT); + return Err(cost::MALFORMED_COMMIT) } } if total_weight < voters.threshold().get() { - return Err(cost::MALFORMED_COMMIT); + return Err(cost::MALFORMED_COMMIT) } // check signatures on all contained precommits. @@ -857,7 +852,7 @@ fn check_compact_commit( } .cost(); - return Err(cost); + return Err(cost) } } @@ -888,16 +883,16 @@ fn check_catch_up( if let Some(weight) = voters.get(id).map(|info| info.weight()) { total_weight += weight.get(); if total_weight > full_threshold { - return Err(cost::MALFORMED_CATCH_UP); + return Err(cost::MALFORMED_CATCH_UP) } } else { debug!(target: "afg", "Skipping catch up message containing unknown voter {}", id); - return Err(cost::MALFORMED_CATCH_UP); + return Err(cost::MALFORMED_CATCH_UP) } } if total_weight < voters.threshold().get() { - return Err(cost::MALFORMED_CATCH_UP); + return Err(cost::MALFORMED_CATCH_UP) } Ok(()) @@ -940,7 +935,7 @@ fn check_catch_up( } .cost(); - return Err(cost); + return Err(cost) } } @@ -1023,7 +1018,7 @@ impl Sink<(RoundNumber, Commit)> for CommitsOut), ) -> Result<(), Self::Error> { if !self.is_voter { - return Ok(()); + return Ok(()) } let (round, commit) = input; diff --git a/client/finality-grandpa/src/communication/periodic.rs b/client/finality-grandpa/src/communication/periodic.rs index 31b7c23377deb..c001796b5ca5d 100644 --- a/client/finality-grandpa/src/communication/periodic.rs +++ b/client/finality-grandpa/src/communication/periodic.rs @@ -89,7 +89,7 @@ impl Stream for NeighborPacketWorker { this.delay.reset(this.rebroadcast_period); this.last = Some((to.clone(), packet.clone())); - return Poll::Ready(Some((to, GossipMessage::::from(packet)))); + return Poll::Ready(Some((to, GossipMessage::::from(packet)))) }, // Don't return yet, maybe the timer fired. Poll::Pending => {}, @@ -108,7 +108,7 @@ impl Stream for NeighborPacketWorker { while let Poll::Ready(()) = this.delay.poll_unpin(cx) {} if let Some((ref to, ref packet)) = this.last { - return Poll::Ready(Some((to.clone(), GossipMessage::::from(packet.clone())))); + return Poll::Ready(Some((to.clone(), GossipMessage::::from(packet.clone())))) } Poll::Pending diff --git a/client/finality-grandpa/src/communication/tests.rs b/client/finality-grandpa/src/communication/tests.rs index 1909c96c84781..eab7bb2df50cf 100644 --- a/client/finality-grandpa/src/communication/tests.rs +++ b/client/finality-grandpa/src/communication/tests.rs @@ -201,11 +201,10 @@ impl Tester { futures::future::poll_fn(move |cx| loop { match Stream::poll_next(Pin::new(&mut s.as_mut().unwrap().events), cx) { Poll::Ready(None) => panic!("concluded early"), - Poll::Ready(Some(item)) => { + Poll::Ready(Some(item)) => if pred(item) { - return Poll::Ready(s.take().unwrap()); - } - }, + return Poll::Ready(s.take().unwrap()) + }, Poll::Pending => return Poll::Pending, } }) @@ -547,9 +546,8 @@ fn bad_commit_leads_to_report() { let fut = future::join(send_message, handle_commit) .then(move |(tester, ())| { tester.filter_network_events(move |event| match event { - Event::Report(who, cost_benefit) => { - who == id && cost_benefit == super::cost::INVALID_COMMIT - }, + Event::Report(who, cost_benefit) => + who == id && cost_benefit == super::cost::INVALID_COMMIT, _ => false, }) }) diff --git a/client/finality-grandpa/src/environment.rs b/client/finality-grandpa/src/environment.rs index 40d3b3efb0b61..f235c3a86c04e 100644 --- a/client/finality-grandpa/src/environment.rs +++ b/client/finality-grandpa/src/environment.rs @@ -281,8 +281,8 @@ impl HasVoted
{ pub fn propose(&self) -> Option<&PrimaryPropose
> { match self { HasVoted::Yes(_, Vote::Propose(propose)) => Some(propose), - HasVoted::Yes(_, Vote::Prevote(propose, _)) - | HasVoted::Yes(_, Vote::Precommit(propose, _, _)) => propose.as_ref(), + HasVoted::Yes(_, Vote::Prevote(propose, _)) | + HasVoted::Yes(_, Vote::Precommit(propose, _, _)) => propose.as_ref(), _ => None, } } @@ -290,8 +290,8 @@ impl HasVoted
{ /// Returns the prevote we should vote with (if any.) pub fn prevote(&self) -> Option<&Prevote
> { match self { - HasVoted::Yes(_, Vote::Prevote(_, prevote)) - | HasVoted::Yes(_, Vote::Precommit(_, prevote, _)) => Some(prevote), + HasVoted::Yes(_, Vote::Prevote(_, prevote)) | + HasVoted::Yes(_, Vote::Precommit(_, prevote, _)) => Some(prevote), _ => None, } } @@ -494,7 +494,7 @@ where if *equivocation.offender() == local_id { return Err(Error::Safety( "Refraining from sending equivocation report for our own equivocation.".into(), - )); + )) } } @@ -517,9 +517,8 @@ where // find the hash of the latest block in the current set let current_set_latest_hash = match next_change { - Some((_, n)) if n.is_zero() => { - return Err(Error::Safety("Authority set change signalled at genesis.".to_string())) - }, + Some((_, n)) if n.is_zero() => + return Err(Error::Safety("Authority set change signalled at genesis.".to_string())), // the next set starts at `n` so the current one lasts until `n - 1`. if // `n` is later than the best block, then the current set is still live // at best block. @@ -553,7 +552,7 @@ where Some(proof) => proof, None => { debug!(target: "afg", "Equivocation offender is not part of the authority set."); - return Ok(()); + return Ok(()) }, }; @@ -602,7 +601,7 @@ where Client: HeaderMetadata, { if base == block { - return Err(GrandpaError::NotDescendent); + return Err(GrandpaError::NotDescendent) } let tree_route_res = sp_blockchain::tree_route(&**client, block, base); @@ -613,12 +612,12 @@ where debug!(target: "afg", "Encountered error computing ancestry between block {:?} and base {:?}: {}", block, base, e); - return Err(GrandpaError::NotDescendent); + return Err(GrandpaError::NotDescendent) }, }; if tree_route.common_block().hash != base { - return Err(GrandpaError::NotDescendent); + return Err(GrandpaError::NotDescendent) } // skip one because our ancestry is meant to start from the parent of `block`, @@ -689,7 +688,7 @@ where // before activating the new set. the `authority_set` is updated immediately thus // we restrict the voter based on that. if set_id != authority_set.set_id() { - return Ok(None); + return Ok(None) } best_chain_containing(block, client, authority_set, select_chain, voting_rule) @@ -708,13 +707,12 @@ where let local_id = local_authority_id(&self.voters, self.config.keystore.as_ref()); let has_voted = match self.voter_set_state.has_voted(round) { - HasVoted::Yes(id, vote) => { + HasVoted::Yes(id, vote) => if local_id.as_ref().map(|k| k == &id).unwrap_or(false) { HasVoted::Yes(id, vote) } else { HasVoted::No - } - }, + }, HasVoted::No => HasVoted::No, }; @@ -790,7 +788,7 @@ where // we've already proposed in this round (in a previous run), // ignore the given vote and don't update the voter set // state - return Ok(None); + return Ok(None) } let mut current_rounds = current_rounds.clone(); @@ -848,7 +846,7 @@ where // we've already prevoted in this round (in a previous run), // ignore the given vote and don't update the voter set // state - return Ok(None); + return Ok(None) } // report to telemetry and prometheus @@ -911,7 +909,7 @@ where // we've already precommitted in this round (in a previous run), // ignore the given vote and don't update the voter set // state - return Ok(None); + return Ok(None) } // report to telemetry and prometheus @@ -922,7 +920,7 @@ where HasVoted::Yes(_, Vote::Prevote(_, prevote)) => prevote, _ => { let msg = "Voter precommitting before prevoting."; - return Err(Error::Safety(msg.to_string())); + return Err(Error::Safety(msg.to_string())) }, }; @@ -974,7 +972,7 @@ where (completed_rounds, current_rounds) } else { let msg = "Voter acting while in paused state."; - return Err(Error::Safety(msg.to_string())); + return Err(Error::Safety(msg.to_string())) }; let mut completed_rounds = completed_rounds.clone(); @@ -1034,7 +1032,7 @@ where (completed_rounds, current_rounds) } else { let msg = "Voter acting while in paused state."; - return Err(Error::Safety(msg.to_string())); + return Err(Error::Safety(msg.to_string())) }; let mut completed_rounds = completed_rounds.clone(); @@ -1165,7 +1163,7 @@ where block, ); - return Ok(None); + return Ok(None) }, }; @@ -1199,7 +1197,7 @@ where } if *target_header.number() == target_number { - break; + break } target_header = client @@ -1230,8 +1228,8 @@ where .await .filter(|(_, restricted_number)| { // we can only restrict votes within the interval [base, target] - restricted_number >= base_header.number() - && restricted_number < target_header.number() + restricted_number >= base_header.number() && + restricted_number < target_header.number() }) .or_else(|| Some((target_header.hash(), *target_header.number()))) }, @@ -1281,7 +1279,7 @@ where status.finalized_number, ); - return Ok(()); + return Ok(()) } // FIXME #1483: clone only when changed @@ -1329,10 +1327,10 @@ where if !justification_required { if let Some(justification_period) = justification_period { let last_finalized_number = client.info().finalized_number; - justification_required = (!last_finalized_number.is_zero() - || number - last_finalized_number == justification_period) - && (last_finalized_number / justification_period - != number / justification_period); + justification_required = (!last_finalized_number.is_zero() || + number - last_finalized_number == justification_period) && + (last_finalized_number / justification_period != + number / justification_period); } } @@ -1416,7 +1414,7 @@ where warn!(target: "afg", "Failed to write updated authority set to disk. Bailing."); warn!(target: "afg", "Node is in a potentially inconsistent state."); - return Err(e.into()); + return Err(e.into()) } } diff --git a/client/finality-grandpa/src/finality_proof.rs b/client/finality-grandpa/src/finality_proof.rs index fc4799aa28856..453b41bc63468 100644 --- a/client/finality-grandpa/src/finality_proof.rs +++ b/client/finality-grandpa/src/finality_proof.rs @@ -111,7 +111,7 @@ where { changes } else { - return Ok(None); + return Ok(None) }; prove_finality(&*self.backend, authority_set_changes, block) @@ -164,7 +164,7 @@ where block, info.finalized_number, ); trace!(target: "afg", "{}", &err); - return Err(FinalityProofError::BlockNotYetFinalized); + return Err(FinalityProofError::BlockNotYetFinalized) } let (justification, just_block) = match authority_set_changes.get_set_id(block) { @@ -179,7 +179,7 @@ where "No justification found for the latest finalized block. \ Returning empty proof.", ); - return Ok(None); + return Ok(None) } }, AuthoritySetChangeId::Set(_, last_block_for_set) => { @@ -199,7 +199,7 @@ where Returning empty proof.", block, ); - return Ok(None); + return Ok(None) }; (justification, last_block_for_set) }, @@ -210,7 +210,7 @@ where You need to resync to populate AuthoritySetChanges properly.", block, ); - return Err(FinalityProofError::BlockNotInAuthoritySetChanges); + return Err(FinalityProofError::BlockNotInAuthoritySetChanges) }, }; @@ -220,7 +220,7 @@ where let mut current = block + One::one(); loop { if current > just_block || headers.len() >= MAX_UNKNOWN_HEADERS { - break; + break } headers.push(backend.blockchain().expect_header(BlockId::Number(current))?); current += One::one(); diff --git a/client/finality-grandpa/src/import.rs b/client/finality-grandpa/src/import.rs index 17cd860b01082..3715287eea31f 100644 --- a/client/finality-grandpa/src/import.rs +++ b/client/finality-grandpa/src/import.rs @@ -105,9 +105,9 @@ where self.authority_set.inner().pending_changes().cloned().collect(); for pending_change in pending_changes { - if pending_change.delay_kind == DelayKind::Finalized - && pending_change.effective_number() > chain_info.finalized_number - && pending_change.effective_number() <= chain_info.best_number + if pending_change.delay_kind == DelayKind::Finalized && + pending_change.effective_number() > chain_info.finalized_number && + pending_change.effective_number() <= chain_info.best_number { let effective_block_hash = if !pending_change.delay.is_zero() { self.select_chain @@ -243,7 +243,7 @@ where ) -> Option>> { // check for forced authority set hard forks if let Some(change) = self.authority_set_hard_forks.get(&hash) { - return Some(change.clone()); + return Some(change.clone()) } // check for forced change. @@ -254,7 +254,7 @@ where canon_height: *header.number(), canon_hash: hash, delay_kind: DelayKind::Best { median_last_finalized }, - }); + }) } // check normal scheduled change. @@ -445,7 +445,7 @@ where self.inner.storage(hash, &sc_client_api::StorageKey(k.to_vec())) { if let Ok(id) = SetId::decode(&mut id.0.as_ref()) { - return Ok(id); + return Ok(id) } } } @@ -539,14 +539,14 @@ where Ok(BlockStatus::InChain) => { // Strip justifications when re-importing an existing block. let _justifications = block.justifications.take(); - return (&*self.inner).import_block(block, new_cache).await; + return (&*self.inner).import_block(block, new_cache).await }, Ok(BlockStatus::Unknown) => {}, Err(e) => return Err(ConsensusError::ClientImport(e.to_string())), } if block.with_state() { - return self.import_state(block, new_cache).await; + return self.import_state(block, new_cache).await } if number <= self.inner.info().finalized_number { @@ -557,7 +557,7 @@ where "Justification required when importing \ an old block with authority set change." .into(), - )); + )) } assert!(block.justifications.is_some()); let mut authority_set = self.authority_set.inner_locked(); @@ -572,7 +572,7 @@ where }, ); } - return (&*self.inner).import_block(block, new_cache).await; + return (&*self.inner).import_block(block, new_cache).await } // on initial sync we will restrict logging under info to avoid spam. @@ -594,7 +594,7 @@ where r, ); pending_changes.revert(); - return Ok(r); + return Ok(r) }, Err(e) => { debug!( @@ -603,7 +603,7 @@ where e, ); pending_changes.revert(); - return Err(ConsensusError::ClientImport(e.to_string())); + return Err(ConsensusError::ClientImport(e.to_string())) }, } }; @@ -675,7 +675,7 @@ where } }); }, - None => { + None => if needs_justification { debug!( target: "afg", @@ -684,8 +684,7 @@ where ); imported_aux.needs_justification = true; - } - }, + }, } Ok(ImportResult::Imported(imported_aux)) @@ -775,7 +774,7 @@ where // justification import pipeline similar to what we do for `BlockImport`. In the // meantime we'll just drop the justification, since this is only used for BEEFY which // is still WIP. - return Ok(()); + return Ok(()) } let justification = GrandpaJustification::decode_and_verify_finalizes( @@ -815,7 +814,7 @@ where // send the command to the voter let _ = self.send_voter_commands.unbounded_send(command); }, - Err(CommandOrError::Error(e)) => { + Err(CommandOrError::Error(e)) => return Err(match e { Error::Grandpa(error) => ConsensusError::ClientImport(error.to_string()), Error::Network(error) => ConsensusError::ClientImport(error), @@ -825,8 +824,7 @@ where Error::Signing(error) => ConsensusError::ClientImport(error), Error::Timer(error) => ConsensusError::ClientImport(error.to_string()), Error::RuntimeApi(error) => ConsensusError::ClientImport(error.to_string()), - }) - }, + }), Ok(_) => { assert!( !enacts_change, diff --git a/client/finality-grandpa/src/justification.rs b/client/finality-grandpa/src/justification.rs index 41ae4bf1aa7e8..56b26c964ce9b 100644 --- a/client/finality-grandpa/src/justification.rs +++ b/client/finality-grandpa/src/justification.rs @@ -101,7 +101,7 @@ impl GrandpaJustification { let mut current_hash = signed.precommit.target_hash; loop { if current_hash == base_hash { - break; + break } match client.header(BlockId::Hash(current_hash))? { @@ -110,7 +110,7 @@ impl GrandpaJustification { // as base and only traverse backwards from the other blocks // in the commit. but better be safe to avoid an unbound loop. if *current_header.number() <= base_number { - return error(); + return error() } let parent_hash = *current_header.parent_hash(); @@ -183,7 +183,7 @@ impl GrandpaJustification { Ok(ref result) if result.is_valid() => {}, _ => { let msg = "invalid commit in grandpa justification".to_string(); - return Err(ClientError::BadJustification(msg)); + return Err(ClientError::BadJustification(msg)) }, } @@ -218,11 +218,11 @@ impl GrandpaJustification { ) { return Err(ClientError::BadJustification( "invalid signature for precommit in grandpa justification".to_string(), - )); + )) } if base_hash == signed.precommit.target_hash { - continue; + continue } match ancestry_chain.ancestry(base_hash, signed.precommit.target_hash) { @@ -234,11 +234,10 @@ impl GrandpaJustification { visited_hashes.insert(hash); } }, - _ => { + _ => return Err(ClientError::BadJustification( "invalid precommit ancestry proof in grandpa justification".to_string(), - )) - }, + )), } } @@ -253,7 +252,7 @@ impl GrandpaJustification { return Err(ClientError::BadJustification( "invalid precommit ancestries in grandpa justification with unused headers" .to_string(), - )); + )) } Ok(()) @@ -294,7 +293,7 @@ where let mut current_hash = block; loop { if current_hash == base { - break; + break } match self.ancestry.get(¤t_hash) { Some(current_header) => { diff --git a/client/finality-grandpa/src/lib.rs b/client/finality-grandpa/src/lib.rs index 843d09cbd562e..a7326d57c2bf0 100644 --- a/client/finality-grandpa/src/lib.rs +++ b/client/finality-grandpa/src/lib.rs @@ -1082,11 +1082,11 @@ where // voters don't conclude naturally return Poll::Ready(Err(Error::Safety( "finality-grandpa inner voter has concluded.".into(), - ))); + ))) }, Poll::Ready(Err(CommandOrError::Error(e))) => { // return inner observer error - return Poll::Ready(Err(e)); + return Poll::Ready(Err(e)) }, Poll::Ready(Err(CommandOrError::VoterCommand(command))) => { // some command issued internally @@ -1099,7 +1099,7 @@ where Poll::Pending => {}, Poll::Ready(None) => { // the `voter_commands_rx` stream should never conclude since it's never closed. - return Poll::Ready(Err(Error::Safety("`voter_commands_rx` was closed.".into()))); + return Poll::Ready(Err(Error::Safety("`voter_commands_rx` was closed.".into()))) }, Poll::Ready(Some(command)) => { // some command issued externally @@ -1142,7 +1142,7 @@ where let revertible = blocks.min(best_number - finalized); if revertible == Zero::zero() { - return Ok(()); + return Ok(()) } let number = best_number - revertible; diff --git a/client/finality-grandpa/src/observer.rs b/client/finality-grandpa/src/observer.rs index a6be7bfa67d75..9bcb03c0555c2 100644 --- a/client/finality-grandpa/src/observer.rs +++ b/client/finality-grandpa/src/observer.rs @@ -97,14 +97,14 @@ where }, voter::CommunicationIn::CatchUp(..) => { // ignore catch up messages - return future::ok(last_finalized_number); + return future::ok(last_finalized_number) }, }; // if the commit we've received targets a block lower or equal to the last // finalized, ignore it and continue with the current state if commit.target_number <= last_finalized_number { - return future::ok(last_finalized_number); + return future::ok(last_finalized_number) } let validation_result = match finality_grandpa::validate_commit( @@ -363,11 +363,11 @@ where Poll::Ready(Ok(())) => { // observer commit stream doesn't conclude naturally; this could reasonably be an // error. - return Poll::Ready(Ok(())); + return Poll::Ready(Ok(())) }, Poll::Ready(Err(CommandOrError::Error(e))) => { // return inner observer error - return Poll::Ready(Err(e)); + return Poll::Ready(Err(e)) }, Poll::Ready(Err(CommandOrError::VoterCommand(command))) => { // some command issued internally @@ -380,7 +380,7 @@ where Poll::Pending => {}, Poll::Ready(None) => { // the `voter_commands_rx` stream should never conclude since it's never closed. - return Poll::Ready(Ok(())); + return Poll::Ready(Ok(())) }, Poll::Ready(Some(command)) => { // some command issued externally diff --git a/client/finality-grandpa/src/tests.rs b/client/finality-grandpa/src/tests.rs index 9ac18df64608d..93d20110ff5af 100644 --- a/client/finality-grandpa/src/tests.rs +++ b/client/finality-grandpa/src/tests.rs @@ -1118,8 +1118,8 @@ fn voter_persists_its_votes() { Pin::new(&mut *round_tx.lock()) .start_send(finality_grandpa::Message::Prevote(prevote)) .unwrap(); - } else if state.compare_exchange(1, 2, Ordering::SeqCst, Ordering::SeqCst).unwrap() - == 1 + } else if state.compare_exchange(1, 2, Ordering::SeqCst, Ordering::SeqCst).unwrap() == + 1 { // the next message we receive should be our own prevote let prevote = match signed.message { @@ -1133,8 +1133,8 @@ fn voter_persists_its_votes() { // after alice restarts it should send its previous prevote // therefore we won't ever receive it again since it will be a // known message on the gossip layer - } else if state.compare_exchange(2, 3, Ordering::SeqCst, Ordering::SeqCst).unwrap() - == 2 + } else if state.compare_exchange(2, 3, Ordering::SeqCst, Ordering::SeqCst).unwrap() == + 2 { // we then receive a precommit from alice for block 15 // even though we casted a prevote for block 30 diff --git a/client/finality-grandpa/src/until_imported.rs b/client/finality-grandpa/src/until_imported.rs index 27b6229a5afd5..df0b63348e94b 100644 --- a/client/finality-grandpa/src/until_imported.rs +++ b/client/finality-grandpa/src/until_imported.rs @@ -333,7 +333,7 @@ where if let Some(metrics) = &mut this.metrics { metrics.waiting_messages_dec(); } - return Poll::Ready(Some(Ok(ready))); + return Poll::Ready(Some(Ok(ready))) } if this.import_notifications.is_done() && this.incoming_messages.is_done() { @@ -366,9 +366,9 @@ impl BlockUntilImported for SignedMessage { if let Some(number) = status_check.block_number(target_hash)? { if number != target_number { warn_authority_wrong_target(target_hash, msg.id); - return Ok(DiscardWaitOrReady::Discard); + return Ok(DiscardWaitOrReady::Discard) } else { - return Ok(DiscardWaitOrReady::Ready(msg)); + return Ok(DiscardWaitOrReady::Ready(msg)) } } @@ -459,7 +459,7 @@ impl BlockUntilImported for BlockGlobalMessage { // invalid global message: messages targeting wrong number // or at least different from other vote in same global // message. - return Ok(false); + return Ok(false) } Ok(true) @@ -473,7 +473,7 @@ impl BlockUntilImported for BlockGlobalMessage { for (target_number, target_hash) in precommit_targets { if !query_known(target_hash, target_number)? { - return Ok(DiscardWaitOrReady::Discard); + return Ok(DiscardWaitOrReady::Discard) } } }, @@ -493,7 +493,7 @@ impl BlockUntilImported for BlockGlobalMessage { for (target_number, target_hash) in targets { if !query_known(target_hash, target_number)? { - return Ok(DiscardWaitOrReady::Discard); + return Ok(DiscardWaitOrReady::Discard) } } }, @@ -511,7 +511,7 @@ impl BlockUntilImported for BlockGlobalMessage { if unknown_hashes.is_empty() { // none of the hashes in the global message were unknown. // we can just return the message directly. - return Ok(DiscardWaitOrReady::Ready(input)); + return Ok(DiscardWaitOrReady::Ready(input)) } let locked_global = Arc::new(Mutex::new(Some(input))); @@ -538,7 +538,7 @@ impl BlockUntilImported for BlockGlobalMessage { // Delete the inner message so it won't ever be forwarded. Future calls to // `wait_completed` on the same `inner` will ignore it. *self.inner.lock() = None; - return None; + return None } match Arc::try_unwrap(self.inner) { @@ -939,10 +939,10 @@ mod tests { let block_sync_requests = block_sync_requester.requests.lock(); // we request blocks targeted by the precommits that aren't imported - if block_sync_requests.contains(&(h2.hash(), *h2.number())) - && block_sync_requests.contains(&(h3.hash(), *h3.number())) + if block_sync_requests.contains(&(h2.hash(), *h2.number())) && + block_sync_requests.contains(&(h3.hash(), *h3.number())) { - return Poll::Ready(()); + return Poll::Ready(()) } // NOTE: nothing in this function is future-aware (i.e nothing gets registered to wake diff --git a/client/finality-grandpa/src/voting_rule.rs b/client/finality-grandpa/src/voting_rule.rs index 993f87a259d1b..fb7754fc0169a 100644 --- a/client/finality-grandpa/src/voting_rule.rs +++ b/client/finality-grandpa/src/voting_rule.rs @@ -105,13 +105,13 @@ where use sp_arithmetic::traits::Saturating; if current_target.number().is_zero() { - return Box::pin(async { None }); + return Box::pin(async { None }) } // Constrain to the base number, if that's the minimal // vote that can be placed. if *base.number() + self.0 > *best_target.number() { - return Box::pin(std::future::ready(Some((base.hash(), *base.number())))); + return Box::pin(std::future::ready(Some((base.hash(), *base.number())))) } // find the target number restricted by this rule @@ -119,7 +119,7 @@ where // our current target is already lower than this rule would restrict if target_number >= *current_target.number() { - return Box::pin(async { None }); + return Box::pin(async { None }) } let current_target = current_target.clone(); @@ -161,7 +161,7 @@ where // our current target is already lower than this rule would restrict if target_number >= *current_target.number() { - return Box::pin(async { None }); + return Box::pin(async { None }) } // find the block at the given target height @@ -192,7 +192,7 @@ where } if *target_header.number() == target_number { - return Some((target_hash, target_number)); + return Some((target_hash, target_number)) } target_hash = *target_header.parent_hash(); @@ -239,8 +239,8 @@ where .await .filter(|(_, restricted_number)| { // NOTE: we can only restrict votes within the interval [base, target) - restricted_number >= base.number() - && restricted_number < restricted_target.number() + restricted_number >= base.number() && + restricted_number < restricted_target.number() }) .and_then(|(hash, _)| backend.header(BlockId::Hash(hash)).ok()) .and_then(std::convert::identity) diff --git a/client/finality-grandpa/src/warp_proof.rs b/client/finality-grandpa/src/warp_proof.rs index 3be4cb60da81f..c9f762fc7d593 100644 --- a/client/finality-grandpa/src/warp_proof.rs +++ b/client/finality-grandpa/src/warp_proof.rs @@ -94,7 +94,7 @@ impl WarpSyncProof { .ok_or_else(|| Error::InvalidRequest("Missing start block".to_string()))?; if begin_number > blockchain.info().finalized_number { - return Err(Error::InvalidRequest("Start block is not finalized".to_string())); + return Err(Error::InvalidRequest("Start block is not finalized".to_string())) } let canon_hash = blockchain.hash(begin_number)?.expect( @@ -106,7 +106,7 @@ impl WarpSyncProof { if canon_hash != begin { return Err(Error::InvalidRequest( "Start block is not in the finalized chain".to_string(), - )); + )) } let mut proofs = Vec::new(); @@ -126,7 +126,7 @@ impl WarpSyncProof { // if it doesn't contain a signal for standard change then the set must have changed // through a forced changed, in which case we stop collecting proofs as the chain of // trust in authority handoffs was broken. - break; + break } let justification = blockchain @@ -148,7 +148,7 @@ impl WarpSyncProof { // room for rest of the data (the size of the `Vec` and the boolean). if proofs_encoded_len + proof_size >= MAX_WARP_SYNC_PROOF_SIZE - 50 { proof_limit_reached = true; - break; + break } proofs_encoded_len += proof_size; @@ -217,7 +217,7 @@ impl WarpSyncProof { if proof.justification.target().1 != hash { return Err(Error::InvalidProof( "Mismatch between header and justification".to_owned(), - )); + )) } if let Some(scheduled_change) = find_scheduled_change::(&proof.header) { @@ -228,7 +228,7 @@ impl WarpSyncProof { // authority set change. return Err(Error::InvalidProof( "Header is missing authority set change digest".to_string(), - )); + )) } } } diff --git a/client/informant/src/display.rs b/client/informant/src/display.rs index d5526706e17ed..3d585a9985134 100644 --- a/client/informant/src/display.rs +++ b/client/informant/src/display.rs @@ -119,12 +119,10 @@ impl InformantDisplay { ), ), (SyncState::Idle, _, _) => ("💤", "Idle".into(), "".into()), - (SyncState::Downloading { target }, _, _) => { - ("⚙️ ", format!("Syncing{}", speed), format!(", target=#{target}")) - }, - (SyncState::Importing { target }, _, _) => { - ("⚙️ ", format!("Preparing{}", speed), format!(", target=#{target}")) - }, + (SyncState::Downloading { target }, _, _) => + ("⚙️ ", format!("Syncing{}", speed), format!(", target=#{target}")), + (SyncState::Importing { target }, _, _) => + ("⚙️ ", format!("Preparing{}", speed), format!(", target=#{target}")), }; if self.format.enable_color { @@ -188,8 +186,8 @@ fn speed( let speed = diff .saturating_mul(10_000) .checked_div(u128::from(elapsed_ms)) - .map_or(0.0, |s| s as f64) - / 10.0; + .map_or(0.0, |s| s as f64) / + 10.0; format!(" {:4.1} bps", speed) } else { // If the number of blocks can't be converted to a regular integer, then we need a more @@ -213,17 +211,17 @@ impl fmt::Display for TransferRateFormat { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { // Special case 0. if self.0 == 0 { - return write!(f, "0"); + return write!(f, "0") } // Under 0.1 kiB, display plain bytes. if self.0 < 100 { - return write!(f, "{} B/s", self.0); + return write!(f, "{} B/s", self.0) } // Under 1.0 MiB/sec, display the value in kiB/sec. if self.0 < 1024 * 1024 { - return write!(f, "{:.1}kiB/s", self.0 as f64 / 1024.0); + return write!(f, "{:.1}kiB/s", self.0 as f64 / 1024.0) } write!(f, "{:.1}MiB/s", self.0 as f64 / (1024.0 * 1024.0)) diff --git a/client/keystore/src/lib.rs b/client/keystore/src/lib.rs index 01657099e90de..cf94a16f08d86 100644 --- a/client/keystore/src/lib.rs +++ b/client/keystore/src/lib.rs @@ -63,9 +63,8 @@ impl From for TraitError { fn from(error: Error) -> Self { match error { Error::KeyNotSupported(id) => TraitError::KeyNotSupported(id), - Error::InvalidSeed | Error::InvalidPhrase | Error::PublicKeyMismatch => { - TraitError::ValidationError(error.to_string()) - }, + Error::InvalidSeed | Error::InvalidPhrase | Error::PublicKeyMismatch => + TraitError::ValidationError(error.to_string()), Error::Unavailable => TraitError::Unavailable, Error::Io(e) => TraitError::Other(e.to_string()), Error::Json(e) => TraitError::Other(e.to_string()), diff --git a/client/keystore/src/local.rs b/client/keystore/src/local.rs index 31ec1d3f000db..54ff6a5b164a8 100644 --- a/client/keystore/src/local.rs +++ b/client/keystore/src/local.rs @@ -245,9 +245,8 @@ impl SyncCryptoStore for LocalKeystore { seed: Option<&str>, ) -> std::result::Result { let pair = match seed { - Some(seed) => { - self.0.write().insert_ephemeral_from_seed_by_type::(seed, id) - }, + Some(seed) => + self.0.write().insert_ephemeral_from_seed_by_type::(seed, id), None => self.0.write().generate_by_type::(id), } .map_err(|e| -> TraitError { e.into() })?; @@ -273,9 +272,8 @@ impl SyncCryptoStore for LocalKeystore { seed: Option<&str>, ) -> std::result::Result { let pair = match seed { - Some(seed) => { - self.0.write().insert_ephemeral_from_seed_by_type::(seed, id) - }, + Some(seed) => + self.0.write().insert_ephemeral_from_seed_by_type::(seed, id), None => self.0.write().generate_by_type::(id), } .map_err(|e| -> TraitError { e.into() })?; @@ -301,9 +299,8 @@ impl SyncCryptoStore for LocalKeystore { seed: Option<&str>, ) -> std::result::Result { let pair = match seed { - Some(seed) => { - self.0.write().insert_ephemeral_from_seed_by_type::(seed, id) - }, + Some(seed) => + self.0.write().insert_ephemeral_from_seed_by_type::(seed, id), None => self.0.write().generate_by_type::(id), } .map_err(|e| -> TraitError { e.into() })?; @@ -471,13 +468,13 @@ impl KeystoreInner { /// Get the key phrase for a given public key and key type. fn key_phrase_by_type(&self, public: &[u8], key_type: KeyTypeId) -> Result> { if let Some(phrase) = self.get_additional_pair(public, key_type) { - return Ok(Some(phrase.clone())); + return Ok(Some(phrase.clone())) } let path = if let Some(path) = self.key_file_path(public, key_type) { path } else { - return Ok(None); + return Ok(None) }; if path.exists() { @@ -498,7 +495,7 @@ impl KeystoreInner { let phrase = if let Some(p) = self.key_phrase_by_type(public.as_slice(), key_type)? { p } else { - return Ok(None); + return Ok(None) }; let pair = Pair::from_string(&phrase, self.password()).map_err(|_| Error::InvalidPhrase)?; @@ -540,7 +537,7 @@ impl KeystoreInner { match array_bytes::hex2bytes(name) { Ok(ref hex) if hex.len() > 4 => { if hex[0..4] != id.0 { - continue; + continue } let public = hex[4..].to_vec(); public_keys.push(public); diff --git a/client/network-gossip/src/bridge.rs b/client/network-gossip/src/bridge.rs index eb3ef66b700ec..5563b3be35e8d 100644 --- a/client/network-gossip/src/bridge.rs +++ b/client/network-gossip/src/bridge.rs @@ -188,13 +188,13 @@ impl Future for GossipEngine { }, Event::NotificationStreamOpened { remote, protocol, role, .. } => { if protocol != this.protocol { - continue; + continue } this.state_machine.new_peer(&mut *this.network, remote, role); }, Event::NotificationStreamClosed { remote, protocol } => { if protocol != this.protocol { - continue; + continue } this.state_machine.peer_disconnected(&mut *this.network, remote); }, @@ -223,7 +223,7 @@ impl Future for GossipEngine { // The network event stream closed. Do the same for [`GossipValidator`]. Poll::Ready(None) => { self.is_terminated = true; - return Poll::Ready(()); + return Poll::Ready(()) }, Poll::Pending => break, } @@ -233,7 +233,7 @@ impl Future for GossipEngine { Some(n) => n, None => { this.forwarding_state = ForwardingState::Idle; - continue; + continue }, }; @@ -251,7 +251,7 @@ impl Future for GossipEngine { Poll::Pending => { // Push back onto queue for later. to_forward.push_front((topic, notification)); - break 'outer; + break 'outer }, } } @@ -261,7 +261,7 @@ impl Future for GossipEngine { if sinks.is_empty() { this.message_sinks.remove(&topic); - continue; + continue } trace!( @@ -731,7 +731,7 @@ mod tests { } if !progress { - break; + break } } Poll::Ready(()) diff --git a/client/network-gossip/src/state_machine.rs b/client/network-gossip/src/state_machine.rs index 266004763d935..001f2c6136a00 100644 --- a/client/network-gossip/src/state_machine.rs +++ b/client/network-gossip/src/state_machine.rs @@ -114,13 +114,12 @@ where for (id, ref mut peer) in peers.iter_mut() { for (message_hash, topic, message) in messages.clone() { let intent = match intent { - MessageIntent::Broadcast { .. } => { + MessageIntent::Broadcast { .. } => if peer.known_messages.contains(message_hash) { - continue; + continue } else { MessageIntent::Broadcast - } - }, + }, MessageIntent::PeriodicRebroadcast => { if peer.known_messages.contains(message_hash) { MessageIntent::PeriodicRebroadcast @@ -134,7 +133,7 @@ where }; if !message_allowed(id, intent, topic, message) { - continue; + continue } peer.known_messages.insert(*message_hash); @@ -356,7 +355,7 @@ impl ConsensusGossip { "Ignored already known message", ); network.report_peer(who, rep::DUPLICATE_GOSSIP); - continue; + continue } // validate the message @@ -376,7 +375,7 @@ impl ConsensusGossip { protocol = %self.protocol, "Discard message from peer", ); - continue; + continue }, }; @@ -389,7 +388,7 @@ impl ConsensusGossip { protocol = %self.protocol, "Got message from unregistered peer", ); - continue; + continue }, }; @@ -422,11 +421,11 @@ impl ConsensusGossip { if force { MessageIntent::ForcedBroadcast } else { MessageIntent::Broadcast }; if !force && peer.known_messages.contains(&entry.message_hash) { - continue; + continue } if !message_allowed(who, intent, &entry.topic, &entry.message) { - continue; + continue } peer.known_messages.insert(entry.message_hash); diff --git a/client/network/bitswap/src/lib.rs b/client/network/bitswap/src/lib.rs index d92f4c52cdcb2..3b26c56e7edaf 100644 --- a/client/network/bitswap/src/lib.rs +++ b/client/network/bitswap/src/lib.rs @@ -176,13 +176,13 @@ impl BitswapRequestHandler { Some(wantlist) => wantlist, None => { debug!(target: LOG_TARGET, "Unexpected bitswap message from {}", peer); - return Err(BitswapError::InvalidWantList); + return Err(BitswapError::InvalidWantList) }, }; if wantlist.entries.len() > MAX_WANTED_BLOCKS { trace!(target: LOG_TARGET, "Ignored request: too many entries"); - return Err(BitswapError::TooManyEntries); + return Err(BitswapError::TooManyEntries) } for entry in wantlist.entries { @@ -190,16 +190,16 @@ impl BitswapRequestHandler { Ok(cid) => cid, Err(e) => { trace!(target: LOG_TARGET, "Bad CID {:?}: {:?}", entry.block, e); - continue; + continue }, }; - if cid.version() != cid::Version::V1 - || cid.hash().code() != u64::from(cid::multihash::Code::Blake2b256) - || cid.hash().size() != 32 + if cid.version() != cid::Version::V1 || + cid.hash().code() != u64::from(cid::multihash::Code::Blake2b256) || + cid.hash().size() != 32 { debug!(target: LOG_TARGET, "Ignoring unsupported CID {}: {}", peer, cid); - continue; + continue } let mut hash = B::Hash::default(); diff --git a/client/network/common/src/config.rs b/client/network/common/src/config.rs index d27e1b4e6c0f3..96c7c11ec2696 100644 --- a/client/network/common/src/config.rs +++ b/client/network/common/src/config.rs @@ -71,9 +71,8 @@ pub fn parse_str_addr(addr_str: &str) -> Result<(PeerId, Multiaddr), ParseErr> { /// Splits a Multiaddress into a Multiaddress and PeerId. pub fn parse_addr(mut addr: Multiaddr) -> Result<(PeerId, Multiaddr), ParseErr> { let who = match addr.pop() { - Some(multiaddr::Protocol::P2p(key)) => { - PeerId::from_multihash(key).map_err(|_| ParseErr::InvalidPeerId)? - }, + Some(multiaddr::Protocol::P2p(key)) => + PeerId::from_multihash(key).map_err(|_| ParseErr::InvalidPeerId)?, _ => return Err(ParseErr::PeerIdMissing), }; diff --git a/client/network/common/src/service/signature.rs b/client/network/common/src/service/signature.rs index 95df1e9b73ffe..602ef3d82979a 100644 --- a/client/network/common/src/service/signature.rs +++ b/client/network/common/src/service/signature.rs @@ -47,7 +47,7 @@ impl Signature { /// Verify whether the signature was made for the given message by the entity that controls the /// given `PeerId`. pub fn verify(&self, message: impl AsRef<[u8]>, peer_id: &PeerId) -> bool { - *peer_id == self.public_key.to_peer_id() - && self.public_key.verify(message.as_ref(), &self.bytes) + *peer_id == self.public_key.to_peer_id() && + self.public_key.verify(message.as_ref(), &self.bytes) } } diff --git a/client/network/common/src/utils.rs b/client/network/common/src/utils.rs index dd7f59081f630..d0e61a0d0475d 100644 --- a/client/network/common/src/utils.rs +++ b/client/network/common/src/utils.rs @@ -51,7 +51,7 @@ impl LruHashSet { if self.set.len() == usize::from(self.limit) { self.set.pop_front(); // remove oldest entry } - return true; + return true } false } diff --git a/client/network/light/src/light_client_requests/handler.rs b/client/network/light/src/light_client_requests/handler.rs index a95f4f40aee7c..abf012b82f9db 100644 --- a/client/network/light/src/light_client_requests/handler.rs +++ b/client/network/light/src/light_client_requests/handler.rs @@ -147,18 +147,14 @@ where let request = schema::v1::light::Request::decode(&payload[..])?; let response = match &request.request { - Some(schema::v1::light::request::Request::RemoteCallRequest(r)) => { - self.on_remote_call_request(&peer, r)? - }, - Some(schema::v1::light::request::Request::RemoteReadRequest(r)) => { - self.on_remote_read_request(&peer, r)? - }, - Some(schema::v1::light::request::Request::RemoteReadChildRequest(r)) => { - self.on_remote_read_child_request(&peer, r)? - }, - None => { - return Err(HandleRequestError::BadRequest("Remote request without request data.")) - }, + Some(schema::v1::light::request::Request::RemoteCallRequest(r)) => + self.on_remote_call_request(&peer, r)?, + Some(schema::v1::light::request::Request::RemoteReadRequest(r)) => + self.on_remote_read_request(&peer, r)?, + Some(schema::v1::light::request::Request::RemoteReadChildRequest(r)) => + self.on_remote_read_child_request(&peer, r)?, + None => + return Err(HandleRequestError::BadRequest("Remote request without request data.")), }; let mut data = Vec::new(); @@ -202,7 +198,7 @@ where ) -> Result { if request.keys.is_empty() { debug!("Invalid remote read request sent by {}.", peer); - return Err(HandleRequestError::BadRequest("Remote read request without keys.")); + return Err(HandleRequestError::BadRequest("Remote read request without keys.")) } trace!( @@ -241,7 +237,7 @@ where ) -> Result { if request.keys.is_empty() { debug!("Invalid remote child read request sent by {}.", peer); - return Err(HandleRequestError::BadRequest("Remove read child request without keys.")); + return Err(HandleRequestError::BadRequest("Remove read child request without keys.")) } trace!( diff --git a/client/network/src/behaviour.rs b/client/network/src/behaviour.rs index daf527e8cf8cf..48d6127f642c3 100644 --- a/client/network/src/behaviour.rs +++ b/client/network/src/behaviour.rs @@ -331,17 +331,14 @@ impl From> for BehaviourOut { protocol, notifications_sink, } => BehaviourOut::NotificationStreamReplaced { remote, protocol, notifications_sink }, - CustomMessageOutcome::NotificationStreamClosed { remote, protocol } => { - BehaviourOut::NotificationStreamClosed { remote, protocol } - }, - CustomMessageOutcome::NotificationsReceived { remote, messages } => { - BehaviourOut::NotificationsReceived { remote, messages } - }, + CustomMessageOutcome::NotificationStreamClosed { remote, protocol } => + BehaviourOut::NotificationStreamClosed { remote, protocol }, + CustomMessageOutcome::NotificationsReceived { remote, messages } => + BehaviourOut::NotificationsReceived { remote, messages }, CustomMessageOutcome::PeerNewBest(_peer_id, _number) => BehaviourOut::None, CustomMessageOutcome::SyncConnected(peer_id) => BehaviourOut::SyncConnected(peer_id), - CustomMessageOutcome::SyncDisconnected(peer_id) => { - BehaviourOut::SyncDisconnected(peer_id) - }, + CustomMessageOutcome::SyncDisconnected(peer_id) => + BehaviourOut::SyncDisconnected(peer_id), CustomMessageOutcome::None => BehaviourOut::None, } } @@ -350,15 +347,12 @@ impl From> for BehaviourOut { impl From for BehaviourOut { fn from(event: request_responses::Event) -> Self { match event { - request_responses::Event::InboundRequest { peer, protocol, result } => { - BehaviourOut::InboundRequest { peer, protocol, result } - }, - request_responses::Event::RequestFinished { peer, protocol, duration, result } => { - BehaviourOut::RequestFinished { peer, protocol, duration, result } - }, - request_responses::Event::ReputationChanges { peer, changes } => { - BehaviourOut::ReputationChanges { peer, changes } - }, + request_responses::Event::InboundRequest { peer, protocol, result } => + BehaviourOut::InboundRequest { peer, protocol, result }, + request_responses::Event::RequestFinished { peer, protocol, duration, result } => + BehaviourOut::RequestFinished { peer, protocol, duration, result }, + request_responses::Event::ReputationChanges { peer, changes } => + BehaviourOut::ReputationChanges { peer, changes }, } } } @@ -381,18 +375,14 @@ impl From for BehaviourOut { BehaviourOut::None }, DiscoveryOut::Discovered(peer_id) => BehaviourOut::Discovered(peer_id), - DiscoveryOut::ValueFound(results, duration) => { - BehaviourOut::Dht(DhtEvent::ValueFound(results), duration) - }, - DiscoveryOut::ValueNotFound(key, duration) => { - BehaviourOut::Dht(DhtEvent::ValueNotFound(key), duration) - }, - DiscoveryOut::ValuePut(key, duration) => { - BehaviourOut::Dht(DhtEvent::ValuePut(key), duration) - }, - DiscoveryOut::ValuePutFailed(key, duration) => { - BehaviourOut::Dht(DhtEvent::ValuePutFailed(key), duration) - }, + DiscoveryOut::ValueFound(results, duration) => + BehaviourOut::Dht(DhtEvent::ValueFound(results), duration), + DiscoveryOut::ValueNotFound(key, duration) => + BehaviourOut::Dht(DhtEvent::ValueNotFound(key), duration), + DiscoveryOut::ValuePut(key, duration) => + BehaviourOut::Dht(DhtEvent::ValuePut(key), duration), + DiscoveryOut::ValuePutFailed(key, duration) => + BehaviourOut::Dht(DhtEvent::ValuePutFailed(key), duration), DiscoveryOut::RandomKademliaStarted => BehaviourOut::RandomKademliaStarted, } } diff --git a/client/network/src/discovery.rs b/client/network/src/discovery.rs index db949bf797513..00fc78061293d 100644 --- a/client/network/src/discovery.rs +++ b/client/network/src/discovery.rs @@ -339,7 +339,7 @@ impl DiscoveryBehaviour { target: "sub-libp2p", "Ignoring self-reported non-global address {} from {}.", addr, peer_id ); - return; + return } if let Some(matching_protocol) = supported_protocols @@ -422,9 +422,8 @@ impl DiscoveryBehaviour { let ip = match addr.iter().next() { Some(Protocol::Ip4(ip)) => IpNetwork::from(ip), Some(Protocol::Ip6(ip)) => IpNetwork::from(ip), - Some(Protocol::Dns(_)) | Some(Protocol::Dns4(_)) | Some(Protocol::Dns6(_)) => { - return true - }, + Some(Protocol::Dns(_)) | Some(Protocol::Dns4(_)) | Some(Protocol::Dns6(_)) => + return true, _ => return false, }; ip.is_global() @@ -647,7 +646,7 @@ impl NetworkBehaviour for DiscoveryBehaviour { ) -> Poll> { // Immediately process the content of `discovered`. if let Some(ev) = self.pending_events.pop_front() { - return Poll::Ready(NetworkBehaviourAction::GenerateEvent(ev)); + return Poll::Ready(NetworkBehaviourAction::GenerateEvent(ev)) } // Poll the stream that fires when we need to start a random Kademlia query. @@ -681,7 +680,7 @@ impl NetworkBehaviour for DiscoveryBehaviour { if actually_started { let ev = DiscoveryOut::RandomKademliaStarted; - return Poll::Ready(NetworkBehaviourAction::GenerateEvent(ev)); + return Poll::Ready(NetworkBehaviourAction::GenerateEvent(ev)) } } } @@ -692,18 +691,18 @@ impl NetworkBehaviour for DiscoveryBehaviour { NetworkBehaviourAction::GenerateEvent(ev) => match ev { KademliaEvent::RoutingUpdated { peer, .. } => { let ev = DiscoveryOut::Discovered(peer); - return Poll::Ready(NetworkBehaviourAction::GenerateEvent(ev)); + return Poll::Ready(NetworkBehaviourAction::GenerateEvent(ev)) }, KademliaEvent::UnroutablePeer { peer, .. } => { let ev = DiscoveryOut::UnroutablePeer(peer); - return Poll::Ready(NetworkBehaviourAction::GenerateEvent(ev)); + return Poll::Ready(NetworkBehaviourAction::GenerateEvent(ev)) }, KademliaEvent::RoutablePeer { peer, .. } => { let ev = DiscoveryOut::Discovered(peer); - return Poll::Ready(NetworkBehaviourAction::GenerateEvent(ev)); + return Poll::Ready(NetworkBehaviourAction::GenerateEvent(ev)) }, - KademliaEvent::PendingRoutablePeer { .. } - | KademliaEvent::InboundRequest { .. } => { + KademliaEvent::PendingRoutablePeer { .. } | + KademliaEvent::InboundRequest { .. } => { // We are not interested in this event at the moment. }, KademliaEvent::OutboundQueryCompleted { @@ -772,7 +771,7 @@ impl NetworkBehaviour for DiscoveryBehaviour { ) }, }; - return Poll::Ready(NetworkBehaviourAction::GenerateEvent(ev)); + return Poll::Ready(NetworkBehaviourAction::GenerateEvent(ev)) }, KademliaEvent::OutboundQueryCompleted { result: QueryResult::PutRecord(res), @@ -780,9 +779,8 @@ impl NetworkBehaviour for DiscoveryBehaviour { .. } => { let ev = match res { - Ok(ok) => { - DiscoveryOut::ValuePut(ok.key, stats.duration().unwrap_or_default()) - }, + Ok(ok) => + DiscoveryOut::ValuePut(ok.key, stats.duration().unwrap_or_default()), Err(e) => { debug!( target: "sub-libp2p", @@ -795,7 +793,7 @@ impl NetworkBehaviour for DiscoveryBehaviour { ) }, }; - return Poll::Ready(NetworkBehaviourAction::GenerateEvent(ev)); + return Poll::Ready(NetworkBehaviourAction::GenerateEvent(ev)) }, KademliaEvent::OutboundQueryCompleted { result: QueryResult::RepublishRecord(res), @@ -817,28 +815,24 @@ impl NetworkBehaviour for DiscoveryBehaviour { warn!(target: "sub-libp2p", "Libp2p => Unhandled Kademlia event: {:?}", e) }, }, - NetworkBehaviourAction::Dial { opts, handler } => { - return Poll::Ready(NetworkBehaviourAction::Dial { opts, handler }) - }, - NetworkBehaviourAction::NotifyHandler { peer_id, handler, event } => { + NetworkBehaviourAction::Dial { opts, handler } => + return Poll::Ready(NetworkBehaviourAction::Dial { opts, handler }), + NetworkBehaviourAction::NotifyHandler { peer_id, handler, event } => return Poll::Ready(NetworkBehaviourAction::NotifyHandler { peer_id, handler, event, - }) - }, - NetworkBehaviourAction::ReportObservedAddr { address, score } => { + }), + NetworkBehaviourAction::ReportObservedAddr { address, score } => return Poll::Ready(NetworkBehaviourAction::ReportObservedAddr { address, score, - }) - }, - NetworkBehaviourAction::CloseConnection { peer_id, connection } => { + }), + NetworkBehaviourAction::CloseConnection { peer_id, connection } => return Poll::Ready(NetworkBehaviourAction::CloseConnection { peer_id, connection, - }) - }, + }), } } @@ -849,13 +843,13 @@ impl NetworkBehaviour for DiscoveryBehaviour { NetworkBehaviourAction::GenerateEvent(event) => match event { MdnsEvent::Discovered(list) => { if self.num_connections >= self.discovery_only_if_under_num { - continue; + continue } self.pending_events .extend(list.map(|(peer_id, _)| DiscoveryOut::Discovered(peer_id))); if let Some(ev) = self.pending_events.pop_front() { - return Poll::Ready(NetworkBehaviourAction::GenerateEvent(ev)); + return Poll::Ready(NetworkBehaviourAction::GenerateEvent(ev)) } }, MdnsEvent::Expired(_) => {}, @@ -864,18 +858,16 @@ impl NetworkBehaviour for DiscoveryBehaviour { unreachable!("mDNS never dials!"); }, NetworkBehaviourAction::NotifyHandler { event, .. } => match event {}, /* `event` is an enum with no variant */ - NetworkBehaviourAction::ReportObservedAddr { address, score } => { + NetworkBehaviourAction::ReportObservedAddr { address, score } => return Poll::Ready(NetworkBehaviourAction::ReportObservedAddr { address, score, - }) - }, - NetworkBehaviourAction::CloseConnection { peer_id, connection } => { + }), + NetworkBehaviourAction::CloseConnection { peer_id, connection } => return Poll::Ready(NetworkBehaviourAction::CloseConnection { peer_id, connection, - }) - }, + }), } } } @@ -991,8 +983,8 @@ mod tests { match e { SwarmEvent::Behaviour(behavior) => { match behavior { - DiscoveryOut::UnroutablePeer(other) - | DiscoveryOut::Discovered(other) => { + DiscoveryOut::UnroutablePeer(other) | + DiscoveryOut::Discovered(other) => { // Call `add_self_reported_address` to simulate identify // happening. let addr = swarms @@ -1032,12 +1024,12 @@ mod tests { // ignore non Behaviour events _ => {}, } - continue 'polling; + continue 'polling }, _ => {}, } } - break; + break } if to_discover.iter().all(|l| l.is_empty()) { diff --git a/client/network/src/network_state.rs b/client/network/src/network_state.rs index b143af776b1d6..57073c57afa69 100644 --- a/client/network/src/network_state.rs +++ b/client/network/src/network_state.rs @@ -106,12 +106,10 @@ pub enum Endpoint { impl From for PeerEndpoint { fn from(endpoint: ConnectedPoint) -> Self { match endpoint { - ConnectedPoint::Dialer { address, role_override } => { - Self::Dialing(address, role_override.into()) - }, - ConnectedPoint::Listener { local_addr, send_back_addr } => { - Self::Listening { local_addr, send_back_addr } - }, + ConnectedPoint::Dialer { address, role_override } => + Self::Dialing(address, role_override.into()), + ConnectedPoint::Listener { local_addr, send_back_addr } => + Self::Listening { local_addr, send_back_addr }, } } } diff --git a/client/network/src/peer_info.rs b/client/network/src/peer_info.rs index b37e9ce10fac4..e04d006f50501 100644 --- a/client/network/src/peer_info.rs +++ b/client/network/src/peer_info.rs @@ -370,27 +370,24 @@ impl NetworkBehaviour for PeerInfoBehaviour { Poll::Ready(NetworkBehaviourAction::Dial { opts, handler }) => { let handler = IntoConnectionHandler::select(handler, self.identify.new_handler()); - return Poll::Ready(NetworkBehaviourAction::Dial { opts, handler }); + return Poll::Ready(NetworkBehaviourAction::Dial { opts, handler }) }, - Poll::Ready(NetworkBehaviourAction::NotifyHandler { peer_id, handler, event }) => { + Poll::Ready(NetworkBehaviourAction::NotifyHandler { peer_id, handler, event }) => return Poll::Ready(NetworkBehaviourAction::NotifyHandler { peer_id, handler, event: EitherOutput::First(event), - }) - }, - Poll::Ready(NetworkBehaviourAction::ReportObservedAddr { address, score }) => { + }), + Poll::Ready(NetworkBehaviourAction::ReportObservedAddr { address, score }) => return Poll::Ready(NetworkBehaviourAction::ReportObservedAddr { address, score, - }) - }, - Poll::Ready(NetworkBehaviourAction::CloseConnection { peer_id, connection }) => { + }), + Poll::Ready(NetworkBehaviourAction::CloseConnection { peer_id, connection }) => return Poll::Ready(NetworkBehaviourAction::CloseConnection { peer_id, connection, - }) - }, + }), } } @@ -401,7 +398,7 @@ impl NetworkBehaviour for PeerInfoBehaviour { IdentifyEvent::Received { peer_id, info, .. } => { self.handle_identify_report(&peer_id, &info); let event = PeerInfoEvent::Identified { peer_id, info }; - return Poll::Ready(NetworkBehaviourAction::GenerateEvent(event)); + return Poll::Ready(NetworkBehaviourAction::GenerateEvent(event)) }, IdentifyEvent::Error { peer_id, error } => { debug!(target: "sub-libp2p", "Identification with peer {:?} failed => {}", peer_id, error) @@ -411,27 +408,24 @@ impl NetworkBehaviour for PeerInfoBehaviour { }, Poll::Ready(NetworkBehaviourAction::Dial { opts, handler }) => { let handler = IntoConnectionHandler::select(self.ping.new_handler(), handler); - return Poll::Ready(NetworkBehaviourAction::Dial { opts, handler }); + return Poll::Ready(NetworkBehaviourAction::Dial { opts, handler }) }, - Poll::Ready(NetworkBehaviourAction::NotifyHandler { peer_id, handler, event }) => { + Poll::Ready(NetworkBehaviourAction::NotifyHandler { peer_id, handler, event }) => return Poll::Ready(NetworkBehaviourAction::NotifyHandler { peer_id, handler, event: EitherOutput::Second(event), - }) - }, - Poll::Ready(NetworkBehaviourAction::ReportObservedAddr { address, score }) => { + }), + Poll::Ready(NetworkBehaviourAction::ReportObservedAddr { address, score }) => return Poll::Ready(NetworkBehaviourAction::ReportObservedAddr { address, score, - }) - }, - Poll::Ready(NetworkBehaviourAction::CloseConnection { peer_id, connection }) => { + }), + Poll::Ready(NetworkBehaviourAction::CloseConnection { peer_id, connection }) => return Poll::Ready(NetworkBehaviourAction::CloseConnection { peer_id, connection, - }) - }, + }), } } diff --git a/client/network/src/protocol.rs b/client/network/src/protocol.rs index 9bcb2eaf961b9..8c1dd39b49be3 100644 --- a/client/network/src/protocol.rs +++ b/client/network/src/protocol.rs @@ -288,8 +288,8 @@ where out_peers: network_config.default_peers_set.out_peers, bootnodes, reserved_nodes: default_sets_reserved.clone(), - reserved_only: network_config.default_peers_set.non_reserved_mode - == NonReservedPeerMode::Deny, + reserved_only: network_config.default_peers_set.non_reserved_mode == + NonReservedPeerMode::Deny, }); for set_cfg in &network_config.extra_sets { @@ -336,8 +336,8 @@ where }; let cache_capacity = NonZeroUsize::new( - (network_config.default_peers_set.in_peers as usize - + network_config.default_peers_set.out_peers as usize) + (network_config.default_peers_set.in_peers as usize + + network_config.default_peers_set.out_peers as usize) .max(1), ) .expect("cache capacity is not zero"); @@ -356,8 +356,8 @@ where default_peers_set_no_slot_connected_peers: HashSet::new(), default_peers_set_num_full: network_config.default_peers_set_num_full as usize, default_peers_set_num_light: { - let total = network_config.default_peers_set.out_peers - + network_config.default_peers_set.in_peers; + let total = network_config.default_peers_set.out_peers + + network_config.default_peers_set.in_peers; total.saturating_sub(network_config.default_peers_set_num_full) as usize }, peerset_handle: peerset_handle.clone(), @@ -521,7 +521,7 @@ where if self.peers.contains_key(&who) { error!(target: "sync", "Called on_sync_peer_connected with already connected peer {}", who); debug_assert!(false); - return Err(()); + return Err(()) } if status.genesis_hash != self.genesis_hash { @@ -544,7 +544,7 @@ where ); } - return Err(()); + return Err(()) } if self.roles.is_light() { @@ -553,7 +553,7 @@ where debug!(target: "sync", "Peer {} is unable to serve light requests", who); self.peerset_handle.report_peer(who, rep::BAD_ROLE); self.behaviour.disconnect_peer(&who, HARDCODED_PEERSETS_SYNC); - return Err(()); + return Err(()) } // we don't interested in peers that are far behind us @@ -566,31 +566,31 @@ where debug!(target: "sync", "Peer {} is far behind us and will unable to serve light requests", who); self.peerset_handle.report_peer(who, rep::PEER_BEHIND_US_LIGHT); self.behaviour.disconnect_peer(&who, HARDCODED_PEERSETS_SYNC); - return Err(()); + return Err(()) } } let no_slot_peer = self.default_peers_set_no_slot_peers.contains(&who); let this_peer_reserved_slot: usize = if no_slot_peer { 1 } else { 0 }; - if status.roles.is_full() - && self.chain_sync.num_peers() - >= self.default_peers_set_num_full - + self.default_peers_set_no_slot_connected_peers.len() - + this_peer_reserved_slot + if status.roles.is_full() && + self.chain_sync.num_peers() >= + self.default_peers_set_num_full + + self.default_peers_set_no_slot_connected_peers.len() + + this_peer_reserved_slot { debug!(target: "sync", "Too many full nodes, rejecting {}", who); self.behaviour.disconnect_peer(&who, HARDCODED_PEERSETS_SYNC); - return Err(()); + return Err(()) } - if status.roles.is_light() - && (self.peers.len() - self.chain_sync.num_peers()) >= self.default_peers_set_num_light + if status.roles.is_light() && + (self.peers.len() - self.chain_sync.num_peers()) >= self.default_peers_set_num_light { // Make sure that not all slots are occupied by light clients. debug!(target: "sync", "Too many light nodes, rejecting {}", who); self.behaviour.disconnect_peer(&who, HARDCODED_PEERSETS_SYNC); - return Err(()); + return Err(()) } let peer = Peer { @@ -610,7 +610,7 @@ where Err(BadPeer(id, repu)) => { self.behaviour.disconnect_peer(&id, HARDCODED_PEERSETS_SYNC); self.peerset_handle.report_peer(id, repu); - return Err(()); + return Err(()) }, } } else { @@ -642,17 +642,17 @@ where Ok(Some(header)) => header, Ok(None) => { warn!("Trying to announce unknown block: {}", hash); - return; + return }, Err(e) => { warn!("Error reading block header {}: {}", hash, e); - return; + return }, }; // don't announce genesis block since it will be ignored if header.number().is_zero() { - return; + return } let is_best = self.chain.info().best_hash == hash; @@ -699,7 +699,7 @@ where None => { log::error!(target: "sync", "Received block announce from disconnected peer {}", who); debug_assert!(false); - return; + return }, }; @@ -738,9 +738,9 @@ where // AND // 2) parent block is already imported and not pruned. if is_best { - return CustomMessageOutcome::PeerNewBest(who, *announce.header.number()); + return CustomMessageOutcome::PeerNewBest(who, *announce.header.number()) } else { - return CustomMessageOutcome::None; + return CustomMessageOutcome::None } }, PollBlockAnnounceValidation::ImportHeader { announce, is_best, who } => { @@ -760,7 +760,7 @@ where } self.report_peer(who, rep::BAD_BLOCK_ANNOUNCEMENT); - return CustomMessageOutcome::None; + return CustomMessageOutcome::None }, }; @@ -1098,7 +1098,7 @@ where params: &mut impl PollParameters, ) -> Poll> { if let Some(message) = self.pending_messages.pop_front() { - return Poll::Ready(NetworkBehaviourAction::GenerateEvent(message)); + return Poll::Ready(NetworkBehaviourAction::GenerateEvent(message)) } // Advance the state of `ChainSync` @@ -1131,28 +1131,24 @@ where } if let Some(message) = self.pending_messages.pop_front() { - return Poll::Ready(NetworkBehaviourAction::GenerateEvent(message)); + return Poll::Ready(NetworkBehaviourAction::GenerateEvent(message)) } let event = match self.behaviour.poll(cx, params) { Poll::Pending => return Poll::Pending, Poll::Ready(NetworkBehaviourAction::GenerateEvent(ev)) => ev, - Poll::Ready(NetworkBehaviourAction::Dial { opts, handler }) => { - return Poll::Ready(NetworkBehaviourAction::Dial { opts, handler }) - }, - Poll::Ready(NetworkBehaviourAction::NotifyHandler { peer_id, handler, event }) => { + Poll::Ready(NetworkBehaviourAction::Dial { opts, handler }) => + return Poll::Ready(NetworkBehaviourAction::Dial { opts, handler }), + Poll::Ready(NetworkBehaviourAction::NotifyHandler { peer_id, handler, event }) => return Poll::Ready(NetworkBehaviourAction::NotifyHandler { peer_id, handler, event, - }) - }, - Poll::Ready(NetworkBehaviourAction::ReportObservedAddr { address, score }) => { - return Poll::Ready(NetworkBehaviourAction::ReportObservedAddr { address, score }) - }, - Poll::Ready(NetworkBehaviourAction::CloseConnection { peer_id, connection }) => { - return Poll::Ready(NetworkBehaviourAction::CloseConnection { peer_id, connection }) - }, + }), + Poll::Ready(NetworkBehaviourAction::ReportObservedAddr { address, score }) => + return Poll::Ready(NetworkBehaviourAction::ReportObservedAddr { address, score }), + Poll::Ready(NetworkBehaviourAction::CloseConnection { peer_id, connection }) => + return Poll::Ready(NetworkBehaviourAction::CloseConnection { peer_id, connection }), }; let outcome = match event { @@ -1254,9 +1250,9 @@ where } } }, - NotificationsOut::CustomProtocolReplaced { peer_id, notifications_sink, set_id } => { - if set_id == HARDCODED_PEERSETS_SYNC - || self.bad_handshake_substreams.contains(&(peer_id, set_id)) + NotificationsOut::CustomProtocolReplaced { peer_id, notifications_sink, set_id } => + if set_id == HARDCODED_PEERSETS_SYNC || + self.bad_handshake_substreams.contains(&(peer_id, set_id)) { CustomMessageOutcome::None } else { @@ -1265,8 +1261,7 @@ where protocol: self.notification_protocols[usize::from(set_id)].clone(), notifications_sink, } - } - }, + }, NotificationsOut::CustomProtocolClosed { peer_id, set_id } => { // Set number 0 is hardcoded the default set of peers we sync from. if set_id == HARDCODED_PEERSETS_SYNC { @@ -1318,9 +1313,8 @@ where ); CustomMessageOutcome::None }, - _ if self.bad_handshake_substreams.contains(&(peer_id, set_id)) => { - CustomMessageOutcome::None - }, + _ if self.bad_handshake_substreams.contains(&(peer_id, set_id)) => + CustomMessageOutcome::None, _ => { let protocol_name = self.notification_protocols[usize::from(set_id)].clone(); CustomMessageOutcome::NotificationsReceived { @@ -1332,11 +1326,11 @@ where }; if !matches!(outcome, CustomMessageOutcome::::None) { - return Poll::Ready(NetworkBehaviourAction::GenerateEvent(outcome)); + return Poll::Ready(NetworkBehaviourAction::GenerateEvent(outcome)) } if let Some(message) = self.pending_messages.pop_front() { - return Poll::Ready(NetworkBehaviourAction::GenerateEvent(message)); + return Poll::Ready(NetworkBehaviourAction::GenerateEvent(message)) } // This block can only be reached if an event was pulled from the behaviour and that diff --git a/client/network/src/protocol/message.rs b/client/network/src/protocol/message.rs index 5df92d010a36d..ef652387d2c7d 100644 --- a/client/network/src/protocol/message.rs +++ b/client/network/src/protocol/message.rs @@ -166,13 +166,12 @@ pub mod generic { let compact = CompactStatus::decode(value)?; let chain_status = match >::decode(value) { Ok(v) => v, - Err(e) => { + Err(e) => if compact.version <= LAST_CHAIN_STATUS_VERSION { - return Err(e); + return Err(e) } else { Vec::new() - } - }, + }, }; let CompactStatus { diff --git a/client/network/src/protocol/notifications/behaviour.rs b/client/network/src/protocol/notifications/behaviour.rs index 9e21d15b82bca..04f6fe445ac63 100644 --- a/client/network/src/protocol/notifications/behaviour.rs +++ b/client/network/src/protocol/notifications/behaviour.rs @@ -427,7 +427,7 @@ impl Notifications { let mut entry = if let Entry::Occupied(entry) = self.peers.entry((*peer_id, set_id)) { entry } else { - return; + return }; match mem::replace(entry.get_mut(), PeerState::Poisoned) { @@ -512,7 +512,7 @@ impl Notifications { target: "sub-libp2p", "State mismatch in libp2p: no entry in incoming for incoming peer" ); - return; + return }; inc.alive = false; @@ -577,7 +577,7 @@ impl Notifications { "Tried to sent notification to {:?} without an open channel.", target, ); - return; + return }, Some(sink) => sink, }; @@ -621,7 +621,7 @@ impl Notifications { handler, }); entry.insert(PeerState::Requested); - return; + return }, }; @@ -824,7 +824,7 @@ impl Notifications { Entry::Vacant(entry) => { trace!(target: "sub-libp2p", "PSM => Drop({}, {:?}): Already disabled.", entry.key().0, set_id); - return; + return }, }; @@ -930,7 +930,7 @@ impl Notifications { self.incoming.remove(pos) } else { error!(target: "sub-libp2p", "PSM => Accept({:?}): Invalid index", index); - return; + return }; if !incoming.alive { @@ -945,14 +945,14 @@ impl Notifications { self.peerset.dropped(incoming.set_id, incoming.peer_id, DropReason::Unknown); }, } - return; + return } let state = match self.peers.get_mut(&(incoming.peer_id, incoming.set_id)) { Some(s) => s, None => { debug_assert!(false); - return; + return }, }; @@ -999,20 +999,20 @@ impl Notifications { self.incoming.remove(pos) } else { error!(target: "sub-libp2p", "PSM => Reject({:?}): Invalid index", index); - return; + return }; if !incoming.alive { trace!(target: "sub-libp2p", "PSM => Reject({:?}, {}, {:?}): Obsolete incoming, \ ignoring", index, incoming.peer_id, incoming.set_id); - return; + return } let state = match self.peers.get_mut(&(incoming.peer_id, incoming.set_id)) { Some(s) => s, None => { debug_assert!(false); - return; + return }, }; @@ -1107,10 +1107,10 @@ impl NetworkBehaviour for Notifications { // In all other states, add this new connection to the list of closed inactive // connections. - PeerState::Incoming { connections, .. } - | PeerState::Disabled { connections, .. } - | PeerState::DisabledPendingEnable { connections, .. } - | PeerState::Enabled { connections, .. } => { + PeerState::Incoming { connections, .. } | + PeerState::Disabled { connections, .. } | + PeerState::DisabledPendingEnable { connections, .. } | + PeerState::Enabled { connections, .. } => { trace!(target: "sub-libp2p", "Libp2p => Connected({}, {:?}, {:?}, {:?}): Secondary connection. Leaving closed.", peer_id, set_id, endpoint, *conn); @@ -1134,7 +1134,7 @@ impl NetworkBehaviour for Notifications { } else { error!(target: "sub-libp2p", "inject_connection_closed: State mismatch in the custom protos handler"); debug_assert!(false); - return; + return }; match mem::replace(entry.get_mut(), PeerState::Poisoned) { @@ -1367,9 +1367,9 @@ impl NetworkBehaviour for Notifications { } }, - PeerState::Requested - | PeerState::PendingRequest { .. } - | PeerState::Backoff { .. } => { + PeerState::Requested | + PeerState::PendingRequest { .. } | + PeerState::Backoff { .. } => { // This is a serious bug either in this state machine or in libp2p. error!(target: "sub-libp2p", "`inject_connection_closed` called for unknown peer {}", @@ -1416,9 +1416,7 @@ impl NetworkBehaviour for Notifications { let ban_duration = match st { PeerState::PendingRequest { timer_deadline, .. } if timer_deadline > now => - { - cmp::max(timer_deadline - now, Duration::from_secs(5)) - }, + cmp::max(timer_deadline - now, Duration::from_secs(5)), _ => Duration::from_secs(5), }; @@ -1442,10 +1440,10 @@ impl NetworkBehaviour for Notifications { // We can still get dial failures even if we are already connected to the // peer, as an extra diagnostic for an earlier attempt. - st @ PeerState::Disabled { .. } - | st @ PeerState::Enabled { .. } - | st @ PeerState::DisabledPendingEnable { .. } - | st @ PeerState::Incoming { .. } => { + st @ PeerState::Disabled { .. } | + st @ PeerState::Enabled { .. } | + st @ PeerState::DisabledPendingEnable { .. } | + st @ PeerState::Incoming { .. } => { *entry.into_mut() = st; }, @@ -1476,7 +1474,7 @@ impl NetworkBehaviour for Notifications { "OpenDesiredByRemote: State mismatch in the custom protos handler" ); debug_assert!(false); - return; + return }; match mem::replace(entry.get_mut(), PeerState::Poisoned) { @@ -1538,8 +1536,8 @@ impl NetworkBehaviour for Notifications { // more to do. debug_assert!(matches!( connec_state, - ConnectionState::OpenDesiredByRemote - | ConnectionState::Closing | ConnectionState::Opening + ConnectionState::OpenDesiredByRemote | + ConnectionState::Closing | ConnectionState::Opening )); } } else { @@ -1658,7 +1656,7 @@ impl NetworkBehaviour for Notifications { } else { error!(target: "sub-libp2p", "CloseDesired: State mismatch in the custom protos handler"); debug_assert!(false); - return; + return }; match mem::replace(entry.get_mut(), PeerState::Poisoned) { @@ -1677,12 +1675,12 @@ impl NetworkBehaviour for Notifications { error!(target: "sub-libp2p", "CloseDesired: State mismatch in the custom protos handler"); debug_assert!(false); - return; + return }; if matches!(connections[pos].1, ConnectionState::Closing) { *entry.into_mut() = PeerState::Enabled { connections }; - return; + return } debug_assert!(matches!(connections[pos].1, ConnectionState::Open(_))); @@ -1734,8 +1732,8 @@ impl NetworkBehaviour for Notifications { // All connections in `Disabled` and `DisabledPendingEnable` have been sent a // `Close` message already, and as such ignore any `CloseDesired` message. - state @ PeerState::Disabled { .. } - | state @ PeerState::DisabledPendingEnable { .. } => { + state @ PeerState::Disabled { .. } | + state @ PeerState::DisabledPendingEnable { .. } => { *entry.into_mut() = state; }, state => { @@ -1755,10 +1753,10 @@ impl NetworkBehaviour for Notifications { match self.peers.get_mut(&(source, set_id)) { // Move the connection from `Closing` to `Closed`. - Some(PeerState::Incoming { connections, .. }) - | Some(PeerState::DisabledPendingEnable { connections, .. }) - | Some(PeerState::Disabled { connections, .. }) - | Some(PeerState::Enabled { connections, .. }) => { + Some(PeerState::Incoming { connections, .. }) | + Some(PeerState::DisabledPendingEnable { connections, .. }) | + Some(PeerState::Disabled { connections, .. }) | + Some(PeerState::Enabled { connections, .. }) => { if let Some((_, connec_state)) = connections.iter_mut().find(|(c, s)| { *c == connection && matches!(s, ConnectionState::Closing) }) { @@ -1827,9 +1825,9 @@ impl NetworkBehaviour for Notifications { } }, - Some(PeerState::Incoming { connections, .. }) - | Some(PeerState::DisabledPendingEnable { connections, .. }) - | Some(PeerState::Disabled { connections, .. }) => { + Some(PeerState::Incoming { connections, .. }) | + Some(PeerState::DisabledPendingEnable { connections, .. }) | + Some(PeerState::Disabled { connections, .. }) => { if let Some((_, connec_state)) = connections.iter_mut().find(|(c, s)| { *c == connection && matches!(s, ConnectionState::OpeningThenClosing) }) { @@ -1861,7 +1859,7 @@ impl NetworkBehaviour for Notifications { } else { error!(target: "sub-libp2p", "OpenResultErr: State mismatch in the custom protos handler"); debug_assert!(false); - return; + return }; match mem::replace(entry.get_mut(), PeerState::Poisoned) { @@ -1901,17 +1899,17 @@ impl NetworkBehaviour for Notifications { *entry.into_mut() = PeerState::Enabled { connections }; } }, - mut state @ PeerState::Incoming { .. } - | mut state @ PeerState::DisabledPendingEnable { .. } - | mut state @ PeerState::Disabled { .. } => { + mut state @ PeerState::Incoming { .. } | + mut state @ PeerState::DisabledPendingEnable { .. } | + mut state @ PeerState::Disabled { .. } => { match &mut state { - PeerState::Incoming { connections, .. } - | PeerState::Disabled { connections, .. } - | PeerState::DisabledPendingEnable { connections, .. } => { + PeerState::Incoming { connections, .. } | + PeerState::Disabled { connections, .. } | + PeerState::DisabledPendingEnable { connections, .. } => { if let Some((_, connec_state)) = connections.iter_mut().find(|(c, s)| { - *c == connection - && matches!(s, ConnectionState::OpeningThenClosing) + *c == connection && + matches!(s, ConnectionState::OpeningThenClosing) }) { *connec_state = ConnectionState::Closing; } else { @@ -1977,7 +1975,7 @@ impl NetworkBehaviour for Notifications { _params: &mut impl PollParameters, ) -> Poll> { if let Some(event) = self.events.pop_front() { - return Poll::Ready(event); + return Poll::Ready(event) } // Poll for instructions from the peerset. @@ -1998,7 +1996,7 @@ impl NetworkBehaviour for Notifications { }, Poll::Ready(None) => { error!(target: "sub-libp2p", "Peerset receiver stream has returned None"); - break; + break }, Poll::Pending => break, } @@ -2066,7 +2064,7 @@ impl NetworkBehaviour for Notifications { } if let Some(event) = self.events.pop_front() { - return Poll::Ready(event); + return Poll::Ready(event) } Poll::Pending diff --git a/client/network/src/protocol/notifications/handler.rs b/client/network/src/protocol/notifications/handler.rs index 4ae13a259be71..ea09cb76edce1 100644 --- a/client/network/src/protocol/notifications/handler.rs +++ b/client/network/src/protocol/notifications/handler.rs @@ -521,13 +521,13 @@ impl ConnectionHandler for NotifsHandler { // in mind that it is invalid for the remote to open multiple such // substreams, and therefore sending a "RST" is the most correct thing // to do. - return; + return }, - State::Opening { ref mut in_substream, .. } - | State::Open { ref mut in_substream, .. } => { + State::Opening { ref mut in_substream, .. } | + State::Open { ref mut in_substream, .. } => { if in_substream.is_some() { // Same remark as above. - return; + return } // Create `handshake_message` on a separate line to be sure that the @@ -545,8 +545,8 @@ impl ConnectionHandler for NotifsHandler { protocol_index: Self::OutboundOpenInfo, ) { match self.protocols[protocol_index].state { - State::Closed { ref mut pending_opening } - | State::OpenDesiredByRemote { ref mut pending_opening, .. } => { + State::Closed { ref mut pending_opening } | + State::OpenDesiredByRemote { ref mut pending_opening, .. } => { debug_assert!(*pending_opening); *pending_opening = false; }, @@ -682,8 +682,8 @@ impl ConnectionHandler for NotifsHandler { _: ConnectionHandlerUpgrErr, ) { match self.protocols[num].state { - State::Closed { ref mut pending_opening } - | State::OpenDesiredByRemote { ref mut pending_opening, .. } => { + State::Closed { ref mut pending_opening } | + State::OpenDesiredByRemote { ref mut pending_opening, .. } => { debug_assert!(*pending_opening); *pending_opening = false; }, @@ -704,7 +704,7 @@ impl ConnectionHandler for NotifsHandler { fn connection_keep_alive(&self) -> KeepAlive { // `Yes` if any protocol has some activity. if self.protocols.iter().any(|p| !matches!(p.state, State::Closed { .. })) { - return KeepAlive::Yes; + return KeepAlive::Yes } // A grace period of `INITIAL_KEEPALIVE_TIME` must be given to leave time for the remote @@ -724,7 +724,7 @@ impl ConnectionHandler for NotifsHandler { >, > { if let Some(ev) = self.events_queue.pop_front() { - return Poll::Ready(ev); + return Poll::Ready(ev) } // For each open substream, try send messages from `notifications_sink_rx` to the @@ -739,11 +739,10 @@ impl ConnectionHandler for NotifsHandler { // available in `notifications_sink_rx`. This avoids waking up the task when // a substream is ready to send if there isn't actually something to send. match Pin::new(&mut *notifications_sink_rx).as_mut().poll_peek(cx) { - Poll::Ready(Some(&NotificationsSinkMessage::ForceClose)) => { + Poll::Ready(Some(&NotificationsSinkMessage::ForceClose)) => return Poll::Ready(ConnectionHandlerEvent::Close( NotifsHandlerError::SyncNotificationsClogged, - )) - }, + )), Poll::Ready(Some(&NotificationsSinkMessage::Notification { .. })) => {}, Poll::Ready(None) | Poll::Pending => break, } @@ -757,15 +756,14 @@ impl ConnectionHandler for NotifsHandler { // Now that the substream is ready for a message, grab what to send. let message = match notifications_sink_rx.poll_next_unpin(cx) { - Poll::Ready(Some(NotificationsSinkMessage::Notification { message })) => { - message - }, - Poll::Ready(Some(NotificationsSinkMessage::ForceClose)) - | Poll::Ready(None) - | Poll::Pending => { + Poll::Ready(Some(NotificationsSinkMessage::Notification { message })) => + message, + Poll::Ready(Some(NotificationsSinkMessage::ForceClose)) | + Poll::Ready(None) | + Poll::Pending => { // Should never be reached, as per `poll_peek` above. debug_assert!(false); - break; + break }, }; @@ -790,15 +788,15 @@ impl ConnectionHandler for NotifsHandler { Poll::Ready(Err(_)) => { *out_substream = None; let event = NotifsHandlerOut::CloseDesired { protocol_index }; - return Poll::Ready(ConnectionHandlerEvent::Custom(event)); + return Poll::Ready(ConnectionHandlerEvent::Custom(event)) }, }; }, - State::Closed { .. } - | State::Opening { .. } - | State::Open { out_substream: None, .. } - | State::OpenDesiredByRemote { .. } => {}, + State::Closed { .. } | + State::Opening { .. } | + State::Open { out_substream: None, .. } | + State::OpenDesiredByRemote { .. } => {}, } } @@ -807,22 +805,21 @@ impl ConnectionHandler for NotifsHandler { // Inbound substreams being closed is always tolerated, except for the // `OpenDesiredByRemote` state which might need to be switched back to `Closed`. match &mut self.protocols[protocol_index].state { - State::Closed { .. } - | State::Open { in_substream: None, .. } - | State::Opening { in_substream: None } => {}, + State::Closed { .. } | + State::Open { in_substream: None, .. } | + State::Opening { in_substream: None } => {}, - State::Open { in_substream: in_substream @ Some(_), .. } => { + State::Open { in_substream: in_substream @ Some(_), .. } => match Stream::poll_next(Pin::new(in_substream.as_mut().unwrap()), cx) { Poll::Pending => {}, Poll::Ready(Some(Ok(message))) => { let event = NotifsHandlerOut::Notification { protocol_index, message }; - return Poll::Ready(ConnectionHandlerEvent::Custom(event)); + return Poll::Ready(ConnectionHandlerEvent::Custom(event)) }, Poll::Ready(None) | Poll::Ready(Some(Err(_))) => *in_substream = None, - } - }, + }, - State::OpenDesiredByRemote { in_substream, pending_opening } => { + State::OpenDesiredByRemote { in_substream, pending_opening } => match NotificationsInSubstream::poll_process(Pin::new(in_substream), cx) { Poll::Pending => {}, Poll::Ready(Ok(void)) => match void {}, @@ -831,12 +828,11 @@ impl ConnectionHandler for NotifsHandler { State::Closed { pending_opening: *pending_opening }; return Poll::Ready(ConnectionHandlerEvent::Custom( NotifsHandlerOut::CloseDesired { protocol_index }, - )); + )) }, - } - }, + }, - State::Opening { in_substream: in_substream @ Some(_), .. } => { + State::Opening { in_substream: in_substream @ Some(_), .. } => match NotificationsInSubstream::poll_process( Pin::new(in_substream.as_mut().unwrap()), cx, @@ -844,8 +840,7 @@ impl ConnectionHandler for NotifsHandler { Poll::Pending => {}, Poll::Ready(Ok(void)) => match void {}, Poll::Ready(Err(_)) => *in_substream = None, - } - }, + }, } } diff --git a/client/network/src/protocol/notifications/tests.rs b/client/network/src/protocol/notifications/tests.rs index c80b2571f8fd8..fa79366d20283 100644 --- a/client/network/src/protocol/notifications/tests.rs +++ b/client/network/src/protocol/notifications/tests.rs @@ -285,9 +285,9 @@ fn reconnect_after_disconnect() { NotificationsOut::CustomProtocolClosed { .. }, )) => match service1_state { ServiceState::FirstConnec => service1_state = ServiceState::Disconnected, - ServiceState::ConnectedAgain - | ServiceState::NotConnected - | ServiceState::Disconnected => panic!(), + ServiceState::ConnectedAgain | + ServiceState::NotConnected | + ServiceState::Disconnected => panic!(), }, future::Either::Right(SwarmEvent::Behaviour( NotificationsOut::CustomProtocolOpen { .. }, @@ -308,17 +308,17 @@ fn reconnect_after_disconnect() { NotificationsOut::CustomProtocolClosed { .. }, )) => match service2_state { ServiceState::FirstConnec => service2_state = ServiceState::Disconnected, - ServiceState::ConnectedAgain - | ServiceState::NotConnected - | ServiceState::Disconnected => panic!(), + ServiceState::ConnectedAgain | + ServiceState::NotConnected | + ServiceState::Disconnected => panic!(), }, _ => {}, } - if service1_state == ServiceState::ConnectedAgain - && service2_state == ServiceState::ConnectedAgain + if service1_state == ServiceState::ConnectedAgain && + service2_state == ServiceState::ConnectedAgain { - break; + break } } @@ -340,8 +340,8 @@ fn reconnect_after_disconnect() { }; match event { - SwarmEvent::Behaviour(NotificationsOut::CustomProtocolOpen { .. }) - | SwarmEvent::Behaviour(NotificationsOut::CustomProtocolClosed { .. }) => panic!(), + SwarmEvent::Behaviour(NotificationsOut::CustomProtocolOpen { .. }) | + SwarmEvent::Behaviour(NotificationsOut::CustomProtocolClosed { .. }) => panic!(), _ => {}, } } diff --git a/client/network/src/protocol/notifications/upgrade/notifications.rs b/client/network/src/protocol/notifications/upgrade/notifications.rs index 950ddfd8fff50..56cfefd75d53d 100644 --- a/client/network/src/protocol/notifications/upgrade/notifications.rs +++ b/client/network/src/protocol/notifications/upgrade/notifications.rs @@ -149,7 +149,7 @@ where return Err(NotificationsHandshakeError::TooLarge { requested: handshake_len, max: MAX_HANDSHAKE_SIZE, - }); + }) } let mut handshake = vec![0u8; handshake_len]; @@ -197,7 +197,7 @@ where pub fn send_handshake(&mut self, message: impl Into>) { if !matches!(self.handshake, NotificationsInSubstreamHandshake::NotSent) { error!(target: "sub-libp2p", "Tried to send handshake twice"); - return; + return } self.handshake = NotificationsInSubstreamHandshake::PendingSend(message.into()); @@ -224,28 +224,27 @@ where }, Poll::Pending => { *this.handshake = NotificationsInSubstreamHandshake::PendingSend(msg); - return Poll::Pending; + return Poll::Pending }, } }, NotificationsInSubstreamHandshake::Flush => { match Sink::poll_flush(this.socket.as_mut(), cx)? { - Poll::Ready(()) => { - *this.handshake = NotificationsInSubstreamHandshake::Sent - }, + Poll::Ready(()) => + *this.handshake = NotificationsInSubstreamHandshake::Sent, Poll::Pending => { *this.handshake = NotificationsInSubstreamHandshake::Flush; - return Poll::Pending; + return Poll::Pending }, } }, - st @ NotificationsInSubstreamHandshake::NotSent - | st @ NotificationsInSubstreamHandshake::Sent - | st @ NotificationsInSubstreamHandshake::ClosingInResponseToRemote - | st @ NotificationsInSubstreamHandshake::BothSidesClosed => { + st @ NotificationsInSubstreamHandshake::NotSent | + st @ NotificationsInSubstreamHandshake::Sent | + st @ NotificationsInSubstreamHandshake::ClosingInResponseToRemote | + st @ NotificationsInSubstreamHandshake::BothSidesClosed => { *this.handshake = st; - return Poll::Pending; + return Poll::Pending }, } } @@ -266,7 +265,7 @@ where match mem::replace(this.handshake, NotificationsInSubstreamHandshake::Sent) { NotificationsInSubstreamHandshake::NotSent => { *this.handshake = NotificationsInSubstreamHandshake::NotSent; - return Poll::Pending; + return Poll::Pending }, NotificationsInSubstreamHandshake::PendingSend(msg) => { match Sink::poll_ready(this.socket.as_mut(), cx) { @@ -279,51 +278,47 @@ where }, Poll::Pending => { *this.handshake = NotificationsInSubstreamHandshake::PendingSend(msg); - return Poll::Pending; + return Poll::Pending }, } }, NotificationsInSubstreamHandshake::Flush => { match Sink::poll_flush(this.socket.as_mut(), cx)? { - Poll::Ready(()) => { - *this.handshake = NotificationsInSubstreamHandshake::Sent - }, + Poll::Ready(()) => + *this.handshake = NotificationsInSubstreamHandshake::Sent, Poll::Pending => { *this.handshake = NotificationsInSubstreamHandshake::Flush; - return Poll::Pending; + return Poll::Pending }, } }, NotificationsInSubstreamHandshake::Sent => { match Stream::poll_next(this.socket.as_mut(), cx) { - Poll::Ready(None) => { + Poll::Ready(None) => *this.handshake = - NotificationsInSubstreamHandshake::ClosingInResponseToRemote - }, + NotificationsInSubstreamHandshake::ClosingInResponseToRemote, Poll::Ready(Some(msg)) => { *this.handshake = NotificationsInSubstreamHandshake::Sent; - return Poll::Ready(Some(msg)); + return Poll::Ready(Some(msg)) }, Poll::Pending => { *this.handshake = NotificationsInSubstreamHandshake::Sent; - return Poll::Pending; + return Poll::Pending }, } }, - NotificationsInSubstreamHandshake::ClosingInResponseToRemote => { + NotificationsInSubstreamHandshake::ClosingInResponseToRemote => match Sink::poll_close(this.socket.as_mut(), cx)? { - Poll::Ready(()) => { - *this.handshake = NotificationsInSubstreamHandshake::BothSidesClosed - }, + Poll::Ready(()) => + *this.handshake = NotificationsInSubstreamHandshake::BothSidesClosed, Poll::Pending => { *this.handshake = NotificationsInSubstreamHandshake::ClosingInResponseToRemote; - return Poll::Pending; + return Poll::Pending }, - } - }, + }, NotificationsInSubstreamHandshake::BothSidesClosed => return Poll::Ready(None), } @@ -378,7 +373,7 @@ where return Err(NotificationsHandshakeError::TooLarge { requested: handshake_len, max: MAX_HANDSHAKE_SIZE, - }); + }) } let mut handshake = vec![0u8; handshake_len]; diff --git a/client/network/src/request_responses.rs b/client/network/src/request_responses.rs index 52737bf121283..d49cbd8051341 100644 --- a/client/network/src/request_responses.rs +++ b/client/network/src/request_responses.rs @@ -212,9 +212,7 @@ impl RequestResponsesBehaviour { match protocols.entry(protocol.name) { Entry::Vacant(e) => e.insert((rq_rp, protocol.inbound_queue)), - Entry::Occupied(e) => { - return Err(RegisterError::DuplicateProtocol(e.key().clone())) - }, + Entry::Occupied(e) => return Err(RegisterError::DuplicateProtocol(e.key().clone())), }; } @@ -368,7 +366,7 @@ impl NetworkBehaviour for RequestResponsesBehaviour { (p_name, event): ::OutEvent, ) { if let Some((proto, _)) = self.protocols.get_mut(&*p_name) { - return proto.inject_event(peer_id, connection, event); + return proto.inject_event(peer_id, connection, event) } log::warn!(target: "sub-libp2p", @@ -463,7 +461,7 @@ impl NetworkBehaviour for RequestResponsesBehaviour { resp_builder, get_peer_reputation, }); - return Poll::Pending; + return Poll::Pending }, Poll::Ready(reputation) => { // Once we get the reputation we can continue processing the request. @@ -479,7 +477,7 @@ impl NetworkBehaviour for RequestResponsesBehaviour { peer, reputation, ); - continue 'poll_all; + continue 'poll_all } let (tx, rx) = oneshot::channel(); @@ -518,7 +516,7 @@ impl NetworkBehaviour for RequestResponsesBehaviour { // This `continue` makes sure that `pending_responses` gets polled // after we have added the new element. - continue 'poll_all; + continue 'poll_all }, } } @@ -559,7 +557,7 @@ impl NetworkBehaviour for RequestResponsesBehaviour { if !reputation_changes.is_empty() { return Poll::Ready(NetworkBehaviourAction::GenerateEvent( Event::ReputationChanges { peer, changes: reputation_changes }, - )); + )) } } @@ -580,27 +578,24 @@ impl NetworkBehaviour for RequestResponsesBehaviour { } let protocol = protocol.to_string(); let handler = self.new_handler_with_replacement(protocol, handler); - return Poll::Ready(NetworkBehaviourAction::Dial { opts, handler }); + return Poll::Ready(NetworkBehaviourAction::Dial { opts, handler }) }, - NetworkBehaviourAction::NotifyHandler { peer_id, handler, event } => { + NetworkBehaviourAction::NotifyHandler { peer_id, handler, event } => return Poll::Ready(NetworkBehaviourAction::NotifyHandler { peer_id, handler, event: ((*protocol).to_string(), event), - }) - }, - NetworkBehaviourAction::ReportObservedAddr { address, score } => { + }), + NetworkBehaviourAction::ReportObservedAddr { address, score } => return Poll::Ready(NetworkBehaviourAction::ReportObservedAddr { address, score, - }) - }, - NetworkBehaviourAction::CloseConnection { peer_id, connection } => { + }), + NetworkBehaviourAction::CloseConnection { peer_id, connection } => return Poll::Ready(NetworkBehaviourAction::CloseConnection { peer_id, connection, - }) - }, + }), }; match ev { @@ -631,7 +626,7 @@ impl NetworkBehaviour for RequestResponsesBehaviour { // This `continue` makes sure that `message_request` gets polled // after we have added the new element. - continue 'poll_all; + continue 'poll_all }, // Received a response from a remote to one of our requests. @@ -657,7 +652,7 @@ impl NetworkBehaviour for RequestResponsesBehaviour { request_id, ); debug_assert!(false); - continue; + continue }, }; @@ -668,7 +663,7 @@ impl NetworkBehaviour for RequestResponsesBehaviour { result: delivered, }; - return Poll::Ready(NetworkBehaviourAction::GenerateEvent(out)); + return Poll::Ready(NetworkBehaviourAction::GenerateEvent(out)) }, // One of our requests has failed. @@ -700,7 +695,7 @@ impl NetworkBehaviour for RequestResponsesBehaviour { request_id, ); debug_assert!(false); - continue; + continue }, }; @@ -711,7 +706,7 @@ impl NetworkBehaviour for RequestResponsesBehaviour { result: Err(RequestFailure::Network(error)), }; - return Poll::Ready(NetworkBehaviourAction::GenerateEvent(out)); + return Poll::Ready(NetworkBehaviourAction::GenerateEvent(out)) }, // An inbound request failed, either while reading the request or due to @@ -727,7 +722,7 @@ impl NetworkBehaviour for RequestResponsesBehaviour { protocol: protocol.clone(), result: Err(ResponseFailure::Network(error)), }; - return Poll::Ready(NetworkBehaviourAction::GenerateEvent(out)); + return Poll::Ready(NetworkBehaviourAction::GenerateEvent(out)) }, // A response to an inbound request has been sent. @@ -756,13 +751,13 @@ impl NetworkBehaviour for RequestResponsesBehaviour { result: Ok(arrival_time), }; - return Poll::Ready(NetworkBehaviourAction::GenerateEvent(out)); + return Poll::Ready(NetworkBehaviourAction::GenerateEvent(out)) }, }; } } - break Poll::Pending; + break Poll::Pending } } } @@ -814,7 +809,7 @@ impl RequestResponseCodec for GenericCodec { return Err(io::Error::new( io::ErrorKind::InvalidInput, format!("Request size exceeds limit: {} > {}", length, self.max_request_size), - )); + )) } // Read the payload. @@ -841,9 +836,7 @@ impl RequestResponseCodec for GenericCodec { Ok(l) => l, Err(unsigned_varint::io::ReadError::Io(err)) if matches!(err.kind(), io::ErrorKind::UnexpectedEof) => - { - return Ok(Err(())) - }, + return Ok(Err(())), Err(err) => return Err(io::Error::new(io::ErrorKind::InvalidInput, err)), }; @@ -851,7 +844,7 @@ impl RequestResponseCodec for GenericCodec { return Err(io::Error::new( io::ErrorKind::InvalidInput, format!("Response size exceeds limit: {} > {}", length, self.max_response_size), - )); + )) } // Read the payload. @@ -1064,7 +1057,7 @@ mod tests { }, SwarmEvent::Behaviour(Event::RequestFinished { result, .. }) => { result.unwrap(); - break; + break }, _ => {}, } @@ -1133,7 +1126,7 @@ mod tests { match swarm.select_next_some().await { SwarmEvent::Behaviour(Event::InboundRequest { result, .. }) => { assert!(result.is_ok()); - break; + break }, _ => {}, } @@ -1167,7 +1160,7 @@ mod tests { }, SwarmEvent::Behaviour(Event::RequestFinished { result, .. }) => { assert!(result.is_err()); - break; + break }, _ => {}, } @@ -1336,7 +1329,7 @@ mod tests { num_responses += 1; result.unwrap(); if num_responses == 2 { - break; + break } }, _ => {}, diff --git a/client/network/src/service.rs b/client/network/src/service.rs index 5c64f094bd46f..7d756ed2d1e88 100644 --- a/client/network/src/service.rs +++ b/client/network/src/service.rs @@ -366,9 +366,8 @@ where match result { Ok(b) => b, - Err(crate::request_responses::RegisterError::DuplicateProtocol(proto)) => { - return Err(Error::DuplicateRequestResponseProtocol { protocol: proto }) - }, + Err(crate::request_responses::RegisterError::DuplicateProtocol(proto)) => + return Err(Error::DuplicateRequestResponseProtocol { protocol: proto }), } }; @@ -590,7 +589,7 @@ where } else { error!(target: "sub-libp2p", "Found state inconsistency between custom protocol \ and debug information about {:?}", peer_id); - return None; + return None }; Some(( @@ -884,7 +883,7 @@ where fn add_reserved_peer(&self, peer: MultiaddrWithPeerId) -> Result<(), String> { // Make sure the local peer ID is never added to the PSM. if peer.peer_id == self.local_peer_id { - return Err("Local peer ID cannot be added as a reserved peer.".to_string()); + return Err("Local peer ID cannot be added as a reserved peer.".to_string()) } let _ = self @@ -910,7 +909,7 @@ where for (peer_id, addr) in peers_addrs.into_iter() { // Make sure the local peer ID is never added to the PSM. if peer_id == self.local_peer_id { - return Err("Local peer ID cannot be added as a reserved peer.".to_string()); + return Err("Local peer ID cannot be added as a reserved peer.".to_string()) } peers.insert(peer_id); @@ -939,7 +938,7 @@ where for (peer_id, addr) in peers.into_iter() { // Make sure the local peer ID is never added to the PSM. if peer_id == self.local_peer_id { - return Err("Local peer ID cannot be added as a reserved peer.".to_string()); + return Err("Local peer ID cannot be added as a reserved peer.".to_string()) } if !addr.is_empty() { @@ -973,7 +972,7 @@ where for (peer_id, addr) in peers.into_iter() { // Make sure the local peer ID is never added to the PSM. if peer_id == self.local_peer_id { - return Err("Local peer ID cannot be added as a reserved peer.".to_string()); + return Err("Local peer ID cannot be added as a reserved peer.".to_string()) } if !addr.is_empty() { @@ -1033,7 +1032,7 @@ where "Attempted to send notification on missing or closed substream: {}, {:?}", target, protocol, ); - return; + return } }; @@ -1065,7 +1064,7 @@ where if let Some(sink) = peers_notifications_sinks.get(&(target, protocol.clone())) { sink.clone() } else { - return Err(NotificationSenderError::Closed); + return Err(NotificationSenderError::Closed) } }; @@ -1308,7 +1307,7 @@ where num_iterations += 1; if num_iterations >= 100 { cx.waker().wake_by_ref(); - break; + break } // Process the next message coming from the `NetworkService`. @@ -1333,12 +1332,10 @@ where .behaviour_mut() .user_protocol_mut() .clear_justification_requests(), - ServiceToWorkerMsg::GetValue(key) => { - this.network_service.behaviour_mut().get_value(key) - }, - ServiceToWorkerMsg::PutValue(key, value) => { - this.network_service.behaviour_mut().put_value(key, value) - }, + ServiceToWorkerMsg::GetValue(key) => + this.network_service.behaviour_mut().get_value(key), + ServiceToWorkerMsg::PutValue(key, value) => + this.network_service.behaviour_mut().put_value(key, value), ServiceToWorkerMsg::SetReservedOnly(reserved_only) => this .network_service .behaviour_mut() @@ -1374,9 +1371,8 @@ where .behaviour_mut() .user_protocol_mut() .remove_set_reserved_peer(protocol, peer_id), - ServiceToWorkerMsg::AddKnownAddress(peer_id, addr) => { - this.network_service.behaviour_mut().add_known_address(peer_id, addr) - }, + ServiceToWorkerMsg::AddKnownAddress(peer_id, addr) => + this.network_service.behaviour_mut().add_known_address(peer_id, addr), ServiceToWorkerMsg::AddToPeersSet(protocol, peer_id) => this .network_service .behaviour_mut() @@ -1429,7 +1425,7 @@ where num_iterations += 1; if num_iterations >= 1000 { cx.waker().wake_by_ref(); - break; + break } // Process the next action coming from the network. @@ -1479,15 +1475,11 @@ where // inbound request whenever a request with an unsupported // protocol is received. This is not reported in order to // avoid confusions. - { - continue - }, - ResponseFailure::Network(InboundFailure::ResponseOmission) => { - "busy-omitted" - }, - ResponseFailure::Network(InboundFailure::ConnectionClosed) => { - "connection-closed" - }, + continue, + ResponseFailure::Network(InboundFailure::ResponseOmission) => + "busy-omitted", + ResponseFailure::Network(InboundFailure::ConnectionClosed) => + "connection-closed", }; metrics @@ -1503,7 +1495,7 @@ where duration, result, .. - })) => { + })) => if let Some(metrics) = this.metrics.as_ref() { match result { Ok(_) => { @@ -1518,13 +1510,11 @@ where RequestFailure::UnknownProtocol => "unknown-protocol", RequestFailure::Refused => "refused", RequestFailure::Obsolete => "obsolete", - RequestFailure::Network(OutboundFailure::DialFailure) => { - "dial-failure" - }, + RequestFailure::Network(OutboundFailure::DialFailure) => + "dial-failure", RequestFailure::Network(OutboundFailure::Timeout) => "timeout", - RequestFailure::Network(OutboundFailure::ConnectionClosed) => { - "connection-closed" - }, + RequestFailure::Network(OutboundFailure::ConnectionClosed) => + "connection-closed", RequestFailure::Network( OutboundFailure::UnsupportedProtocols, ) => "unsupported", @@ -1536,16 +1526,14 @@ where .inc(); }, } - } - }, + }, Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::ReputationChanges { peer, changes, - })) => { + })) => for change in changes { this.network_service.behaviour().user_protocol().report_peer(peer, change); - } - }, + }, Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::PeerIdentify { peer_id, info: @@ -1581,11 +1569,10 @@ where .user_protocol_mut() .add_default_set_discovered_nodes(iter::once(peer_id)); }, - Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::RandomKademliaStarted)) => { + Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::RandomKademliaStarted)) => if let Some(metrics) = this.metrics.as_ref() { metrics.kademlia_random_queries_total.inc(); - } - }, + }, Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::NotificationStreamOpened { remote, protocol, @@ -1807,15 +1794,14 @@ where let reason = match error { DialError::ConnectionLimit(_) => Some("limit-reached"), DialError::InvalidPeerId(_) => Some("invalid-peer-id"), - DialError::Transport(_) | DialError::ConnectionIo(_) => { - Some("transport-error") - }, - DialError::Banned - | DialError::LocalPeerId - | DialError::NoAddresses - | DialError::DialPeerConditionFalse(_) - | DialError::WrongPeerId { .. } - | DialError::Aborted => None, // ignore them + DialError::Transport(_) | DialError::ConnectionIo(_) => + Some("transport-error"), + DialError::Banned | + DialError::LocalPeerId | + DialError::NoAddresses | + DialError::DialPeerConditionFalse(_) | + DialError::WrongPeerId { .. } | + DialError::Aborted => None, // ignore them }; if let Some(reason) = reason { metrics @@ -1849,8 +1835,8 @@ where let reason = match error { PendingConnectionError::ConnectionLimit(_) => Some("limit-reached"), PendingConnectionError::WrongPeerId { .. } => Some("invalid-peer-id"), - PendingConnectionError::Transport(_) - | PendingConnectionError::IO(_) => Some("transport-error"), + PendingConnectionError::Transport(_) | + PendingConnectionError::IO(_) => Some("transport-error"), PendingConnectionError::Aborted => None, // ignore it }; @@ -2027,7 +2013,7 @@ fn ensure_addresses_consistent_with_transport<'a>( return Err(Error::AddressesForAnotherTransport { transport: transport.clone(), addresses, - }); + }) } } else { let addresses: Vec<_> = addresses @@ -2039,7 +2025,7 @@ fn ensure_addresses_consistent_with_transport<'a>( return Err(Error::AddressesForAnotherTransport { transport: transport.clone(), addresses, - }); + }) } } diff --git a/client/network/src/service/out_events.rs b/client/network/src/service/out_events.rs index 7a8012cfd04cf..4144d7f19551e 100644 --- a/client/network/src/service/out_events.rs +++ b/client/network/src/service/out_events.rs @@ -261,7 +261,7 @@ impl Metrics { .inc_by(num); }); }, - Event::NotificationsReceived { messages, .. } => { + Event::NotificationsReceived { messages, .. } => for (protocol, message) in messages { format_label("notif-", protocol, |protocol_label| { self.events_total @@ -271,8 +271,7 @@ impl Metrics { self.notifications_sizes.with_label_values(&[protocol, "sent", name]).inc_by( num.saturating_mul(u64::try_from(message.len()).unwrap_or(u64::MAX)), ); - } - }, + }, } } @@ -299,7 +298,7 @@ impl Metrics { self.events_total.with_label_values(&[protocol_label, "received", name]).inc(); }); }, - Event::NotificationsReceived { messages, .. } => { + Event::NotificationsReceived { messages, .. } => for (protocol, message) in messages { format_label("notif-", protocol, |protocol_label| { self.events_total @@ -309,8 +308,7 @@ impl Metrics { self.notifications_sizes .with_label_values(&[protocol, "received", name]) .inc_by(u64::try_from(message.len()).unwrap_or(u64::MAX)); - } - }, + }, } } } diff --git a/client/network/src/service/tests/service.rs b/client/network/src/service/tests/service.rs index 4005bc3a78429..90945fdcef2cf 100644 --- a/client/network/src/service/tests/service.rs +++ b/client/network/src/service/tests/service.rs @@ -101,7 +101,7 @@ fn notifications_state_consistent() { iterations += 1; if iterations >= 1_000 { assert!(something_happened); - break; + break } // Start by sending a notification from node1 to node2 and vice-versa. Part of the @@ -137,12 +137,10 @@ fn notifications_state_consistent() { // forever while nothing at all happens on the network. let continue_test = futures_timer::Delay::new(Duration::from_millis(20)); match future::select(future::select(next1, next2), continue_test).await { - future::Either::Left((future::Either::Left((Some(ev), _)), _)) => { - future::Either::Left(ev) - }, - future::Either::Left((future::Either::Right((Some(ev), _)), _)) => { - future::Either::Right(ev) - }, + future::Either::Left((future::Either::Left((Some(ev), _)), _)) => + future::Either::Left(ev), + future::Either::Left((future::Either::Right((Some(ev), _)), _)) => + future::Either::Right(ev), future::Either::Right(_) => continue, _ => break, } @@ -151,42 +149,38 @@ fn notifications_state_consistent() { match next_event { future::Either::Left(Event::NotificationStreamOpened { remote, protocol, .. - }) => { + }) => if protocol == PROTOCOL_NAME.into() { something_happened = true; assert!(!node1_to_node2_open); node1_to_node2_open = true; assert_eq!(remote, node2.local_peer_id()); - } - }, + }, future::Either::Right(Event::NotificationStreamOpened { remote, protocol, .. - }) => { + }) => if protocol == PROTOCOL_NAME.into() { something_happened = true; assert!(!node2_to_node1_open); node2_to_node1_open = true; assert_eq!(remote, node1.local_peer_id()); - } - }, + }, future::Either::Left(Event::NotificationStreamClosed { remote, protocol, .. - }) => { + }) => if protocol == PROTOCOL_NAME.into() { assert!(node1_to_node2_open); node1_to_node2_open = false; assert_eq!(remote, node2.local_peer_id()); - } - }, + }, future::Either::Right(Event::NotificationStreamClosed { remote, protocol, .. - }) => { + }) => if protocol == PROTOCOL_NAME.into() { assert!(node2_to_node1_open); node2_to_node1_open = false; assert_eq!(remote, node1.local_peer_id()); - } - }, + }, future::Either::Left(Event::NotificationsReceived { remote, .. }) => { assert!(node1_to_node2_open); assert_eq!(remote, node2.local_peer_id()); @@ -302,13 +296,12 @@ fn notifications_back_pressure() { while received_notifications < TOTAL_NOTIFS { match events_stream2.next().await.unwrap() { Event::NotificationStreamClosed { .. } => panic!(), - Event::NotificationsReceived { messages, .. } => { + Event::NotificationsReceived { messages, .. } => for message in messages { assert_eq!(message.0, PROTOCOL_NAME.into()); assert_eq!(message.1, format!("hello #{}", received_notifications)); received_notifications += 1; - } - }, + }, _ => {}, }; @@ -383,7 +376,7 @@ fn fallback_name_working() { Event::NotificationStreamOpened { protocol, negotiated_fallback, .. } => { assert_eq!(protocol, PROTOCOL_NAME.into()); assert_eq!(negotiated_fallback, None); - break; + break }, _ => {}, }; @@ -398,7 +391,7 @@ fn fallback_name_working() { if protocol == NEW_PROTOCOL_NAME.into() => { assert_eq!(negotiated_fallback, Some(PROTOCOL_NAME.into())); - break; + break }, _ => {}, }; diff --git a/client/network/sync/src/block_request_handler.rs b/client/network/sync/src/block_request_handler.rs index 2e3d0218ce153..b5f8b6b73bce9 100644 --- a/client/network/sync/src/block_request_handler.rs +++ b/client/network/sync/src/block_request_handler.rs @@ -365,12 +365,11 @@ where let body = if get_body { match self.client.block_body(hash)? { - Some(mut extrinsics) => { - extrinsics.iter_mut().map(|extrinsic| extrinsic.encode()).collect() - }, + Some(mut extrinsics) => + extrinsics.iter_mut().map(|extrinsic| extrinsic.encode()).collect(), None => { log::trace!(target: LOG_TARGET, "Missing data for block request."); - break; + break }, } } else { @@ -407,13 +406,13 @@ where indexed_body, }; - let new_total_size = total_size - + block_data.body.iter().map(|ex| ex.len()).sum::() - + block_data.indexed_body.iter().map(|ex| ex.len()).sum::(); + let new_total_size = total_size + + block_data.body.iter().map(|ex| ex.len()).sum::() + + block_data.indexed_body.iter().map(|ex| ex.len()).sum::(); // Send at least one block, but make sure to not exceed the limit. if !blocks.is_empty() && new_total_size > MAX_BODY_BYTES { - break; + break } total_size = new_total_size; @@ -421,14 +420,14 @@ where blocks.push(block_data); if blocks.len() >= max_blocks as usize { - break; + break } match direction { Direction::Ascending => block_id = BlockId::Number(number + One::one()), Direction::Descending => { if number.is_zero() { - break; + break } block_id = BlockId::Hash(parent_hash) }, diff --git a/client/network/sync/src/blocks.rs b/client/network/sync/src/blocks.rs index 945e746bf4604..b8acd61a2009f 100644 --- a/client/network/sync/src/blocks.rs +++ b/client/network/sync/src/blocks.rs @@ -82,7 +82,7 @@ impl BlockCollection { /// Insert a set of blocks into collection. pub fn insert(&mut self, start: NumberFor, blocks: Vec>, who: PeerId) { if blocks.is_empty() { - return; + return } match self.blocks.get(&start) { @@ -91,7 +91,7 @@ impl BlockCollection { }, Some(&BlockRangeState::Complete(ref existing)) if existing.len() >= blocks.len() => { trace!(target: "sync", "Ignored block data already downloaded: {}", start); - return; + return }, _ => (), } @@ -117,7 +117,7 @@ impl BlockCollection { ) -> Option>> { if peer_best <= common { // Bail out early - return None; + return None } // First block number that we need to download let first_different = common + >::one(); @@ -130,28 +130,24 @@ impl BlockCollection { break match (prev, next) { (Some((start, &BlockRangeState::Downloading { ref len, downloading })), _) if downloading < max_parallel => - { - (*start..*start + *len, downloading) - }, - (Some((start, r)), Some((next_start, _))) if *start + r.len() < *next_start => { - (*start + r.len()..cmp::min(*next_start, *start + r.len() + count), 0) - }, // gap + (*start..*start + *len, downloading), + (Some((start, r)), Some((next_start, _))) if *start + r.len() < *next_start => + (*start + r.len()..cmp::min(*next_start, *start + r.len() + count), 0), // gap (Some((start, r)), None) => (*start + r.len()..*start + r.len() + count, 0), /* last range */ (None, None) => (first_different..first_different + count, 0), /* empty */ - (None, Some((start, _))) if *start > first_different => { - (first_different..cmp::min(first_different + count, *start), 0) - }, /* gap at the start */ + (None, Some((start, _))) if *start > first_different => + (first_different..cmp::min(first_different + count, *start), 0), /* gap at the start */ _ => { prev = next; - continue; + continue }, - }; + } } }; // crop to peers best if range.start > peer_best { trace!(target: "sync", "Out of range for peer {} ({} vs {})", who, range.start, peer_best); - return None; + return None } range.end = cmp::min(peer_best + One::one(), range.end); @@ -162,7 +158,7 @@ impl BlockCollection { .map_or(false, |(n, _)| range.start > *n + max_ahead.into()) { trace!(target: "sync", "Too far ahead for peer {} ({})", who, range.start); - return None; + return None } self.peer_requests.insert(who, range.start); @@ -193,7 +189,7 @@ impl BlockCollection { let mut prev = from; for (&start, range_data) in &mut self.blocks { if start > prev { - break; + break } let len = match range_data { BlockRangeState::Complete(blocks) => { diff --git a/client/network/sync/src/extra_requests.rs b/client/network/sync/src/extra_requests.rs index 2911768a198eb..0506bd542ff3b 100644 --- a/client/network/sync/src/extra_requests.rs +++ b/client/network/sync/src/extra_requests.rs @@ -130,7 +130,7 @@ impl ExtraRequests { ); self.importing_requests.insert(request); - return Some((who, request.0, request.1, r)); + return Some((who, request.0, request.1, r)) } else { trace!(target: "sync", "Empty {} response from {:?} for {:?}", @@ -161,7 +161,7 @@ impl ExtraRequests { let request = (*best_finalized_hash, best_finalized_number); if self.try_finalize_root::<()>(request, Ok(request), false) { - return Ok(()); + return Ok(()) } if best_finalized_number > self.best_seen_finalized_number { @@ -201,7 +201,7 @@ impl ExtraRequests { reschedule_on_failure: bool, ) -> bool { if !self.importing_requests.remove(&request) { - return false; + return false } let (finalized_hash, finalized_number) = match result { @@ -210,7 +210,7 @@ impl ExtraRequests { if reschedule_on_failure { self.pending_requests.push_front(request); } - return true; + return true }, }; @@ -219,7 +219,7 @@ impl ExtraRequests { "‼️ Imported {:?} {:?} which isn't a root in the tree: {:?}", finalized_hash, finalized_number, self.tree.roots().collect::>() ); - return true; + return true } self.failed_requests.clear(); @@ -287,7 +287,7 @@ impl<'a, B: BlockT> Matcher<'a, B> { peers: &HashMap>, ) -> Option<(PeerId, ExtraRequest)> { if self.remaining == 0 { - return None; + return None } // clean up previously failed requests so we can retry again @@ -302,11 +302,11 @@ impl<'a, B: BlockT> Matcher<'a, B> { // only ask peers that have synced at least up to the block number that we're asking // the extra for if sync.best_number < request.1 { - continue; + continue } // don't request to any peers that already have pending requests if self.extras.active_requests.contains_key(peer) { - continue; + continue } // only ask if the same request has not failed for this peer before if self @@ -316,7 +316,7 @@ impl<'a, B: BlockT> Matcher<'a, B> { .map(|rr| rr.iter().any(|i| &i.0 == peer)) .unwrap_or(false) { - continue; + continue } self.extras.active_requests.insert(*peer, request); @@ -325,14 +325,14 @@ impl<'a, B: BlockT> Matcher<'a, B> { self.extras.request_type_name, peer, request, ); - return Some((*peer, request)); + return Some((*peer, request)) } self.extras.pending_requests.push_back(request); self.remaining -= 1; if self.remaining == 0 { - break; + break } } diff --git a/client/network/sync/src/lib.rs b/client/network/sync/src/lib.rs index 2b54ab834cbf3..697445334a073 100644 --- a/client/network/sync/src/lib.rs +++ b/client/network/sync/src/lib.rs @@ -477,9 +477,8 @@ where phase: WarpSyncPhase::DownloadingBlocks(gap_sync.best_queued_number), total_bytes: 0, }), - (None, SyncMode::Warp, _) => { - Some(WarpSyncProgress { phase: WarpSyncPhase::AwaitingPeers, total_bytes: 0 }) - }, + (None, SyncMode::Warp, _) => + Some(WarpSyncProgress { phase: WarpSyncPhase::AwaitingPeers, total_bytes: 0 }), (Some(sync), _, _) => Some(sync.progress()), _ => None, }; @@ -532,7 +531,7 @@ where Ok(BlockStatus::Unknown) => { if best_number.is_zero() { info!("💔 New peer with unknown genesis hash {} ({}).", best_hash, best_number); - return Err(BadPeer(who, rep::GENESIS_MISMATCH)); + return Err(BadPeer(who, rep::GENESIS_MISMATCH)) } // If there are more than `MAJOR_SYNC_BLOCKS` in the import queue then we have // enough to do in the import queue that it's not worth kicking off @@ -554,7 +553,7 @@ where state: PeerSyncState::Available, }, ); - return Ok(None); + return Ok(None) } // If we are at genesis, just start downloading. @@ -612,9 +611,9 @@ where Ok(req) }, - Ok(BlockStatus::Queued) - | Ok(BlockStatus::InChainWithState) - | Ok(BlockStatus::InChainPruned) => { + Ok(BlockStatus::Queued) | + Ok(BlockStatus::InChainWithState) | + Ok(BlockStatus::InChainPruned) => { debug!( target: "sync", "New peer with known best hash {} ({}).", @@ -679,14 +678,14 @@ where if self.is_known(hash) { debug!(target: "sync", "Refusing to sync known hash {:?}", hash); - return; + return } trace!(target: "sync", "Downloading requested old fork {:?}", hash); for peer_id in &peers { if let Some(peer) = self.peers.get_mut(peer_id) { if let PeerSyncState::AncestorSearch { .. } = peer.state { - continue; + continue } if number > peer.best_number { @@ -770,14 +769,14 @@ where blocks } else { debug!(target: "sync", "Unexpected gap block response from {}", who); - return Err(BadPeer(*who, rep::NO_BLOCK)); + return Err(BadPeer(*who, rep::NO_BLOCK)) } }, PeerSyncState::DownloadingStale(_) => { peer.state = PeerSyncState::Available; if blocks.is_empty() { debug!(target: "sync", "Empty block response from {}", who); - return Err(BadPeer(*who, rep::NO_BLOCK)); + return Err(BadPeer(*who, rep::NO_BLOCK)) } validate_blocks::(&blocks, who, Some(request))?; blocks @@ -819,7 +818,7 @@ where "Invalid response when searching for ancestor from {}", who, ); - return Err(BadPeer(*who, rep::UNKNOWN_ANCESTOR)); + return Err(BadPeer(*who, rep::UNKNOWN_ANCESTOR)) }, (_, Err(e)) => { info!( @@ -827,12 +826,12 @@ where "❌ Error answering legitimate blockchain query: {}", e, ); - return Err(BadPeer(*who, rep::BLOCKCHAIN_READ_ERROR)); + return Err(BadPeer(*who, rep::BLOCKCHAIN_READ_ERROR)) }, }; if matching_hash.is_some() { - if *start < self.best_queued_number - && self.best_queued_number <= peer.best_number + if *start < self.best_queued_number && + self.best_queued_number <= peer.best_number { // We've made progress on this chain since the search was started. // Opportunistically set common number to updated number @@ -844,7 +843,7 @@ where } if matching_hash.is_none() && current.is_zero() { trace!(target:"sync", "Ancestry search: genesis mismatch for peer {}", who); - return Err(BadPeer(*who, rep::GENESIS_MISMATCH)); + return Err(BadPeer(*who, rep::GENESIS_MISMATCH)) } if let Some((next_state, next_num)) = handle_ancestor_search_state(state, *current, matching_hash.is_some()) @@ -854,7 +853,7 @@ where start: *start, state: next_state, }; - return Ok(OnBlockData::Request(*who, ancestry_request::(next_num))); + return Ok(OnBlockData::Request(*who, ancestry_request::(next_num))) } else { // Ancestry search is complete. Check if peer is on a stale fork unknown // to us and add it to sync targets if necessary. @@ -868,8 +867,8 @@ where matching_hash, peer.common_number, ); - if peer.common_number < peer.best_number - && peer.best_number < self.best_queued_number + if peer.common_number < peer.best_number && + peer.best_number < self.best_queued_number { trace!( target: "sync", @@ -899,16 +898,14 @@ where match warp_sync.import_target_block( blocks.pop().expect("`blocks` len checked above."), ) { - TargetBlockImportResult::Success => { - return Ok(OnBlockData::Continue) - }, - TargetBlockImportResult::BadResponse => { - return Err(BadPeer(*who, rep::VERIFICATION_FAIL)) - }, + TargetBlockImportResult::Success => + return Ok(OnBlockData::Continue), + TargetBlockImportResult::BadResponse => + return Err(BadPeer(*who, rep::VERIFICATION_FAIL)), } } else if blocks.is_empty() { debug!(target: "sync", "Empty block response from {}", who); - return Err(BadPeer(*who, rep::NO_BLOCK)); + return Err(BadPeer(*who, rep::NO_BLOCK)) } else { debug!( target: "sync", @@ -916,7 +913,7 @@ where blocks.len(), who, ); - return Err(BadPeer(*who, rep::NOT_REQUESTED)); + return Err(BadPeer(*who, rep::NOT_REQUESTED)) } } else { debug!( @@ -924,13 +921,13 @@ where "Logic error: we think we are downloading warp target block from {}, but no warp sync is happening.", who, ); - return Ok(OnBlockData::Continue); + return Ok(OnBlockData::Continue) } }, - PeerSyncState::Available - | PeerSyncState::DownloadingJustification(..) - | PeerSyncState::DownloadingState - | PeerSyncState::DownloadingWarpProof => Vec::new(), + PeerSyncState::Available | + PeerSyncState::DownloadingJustification(..) | + PeerSyncState::DownloadingState | + PeerSyncState::DownloadingWarpProof => Vec::new(), } } else { // When request.is_none() this is a block announcement. Just accept blocks. @@ -958,7 +955,7 @@ where } } else { // We don't know of this peer, so we also did not request anything from it. - return Err(BadPeer(*who, rep::NOT_REQUESTED)); + return Err(BadPeer(*who, rep::NOT_REQUESTED)) }; Ok(self.validate_and_queue_blocks(new_blocks, gap)) @@ -973,7 +970,7 @@ where peer } else { error!(target: "sync", "💔 Called on_block_justification with a bad peer ID"); - return Ok(OnBlockJustification::Nothing); + return Ok(OnBlockJustification::Nothing) }; self.allowed_requests.add(&who); @@ -990,7 +987,7 @@ where hash, block.hash, ); - return Err(BadPeer(who, rep::BAD_JUSTIFICATION)); + return Err(BadPeer(who, rep::BAD_JUSTIFICATION)) } block @@ -1012,7 +1009,7 @@ where if let Some((peer, hash, number, j)) = self.extra_justifications.on_response(who, justification) { - return Ok(OnBlockJustification::Import { peer, hash, number, justifications: j }); + return Ok(OnBlockJustification::Import { peer, hash, number, justifications: j }) } } @@ -1039,7 +1036,7 @@ where } for (result, hash) in results { if has_error { - break; + break } if result.is_err() { @@ -1047,11 +1044,10 @@ where } match result { - Ok(BlockImportStatus::ImportedKnown(number, who)) => { + Ok(BlockImportStatus::ImportedKnown(number, who)) => if let Some(peer) = who { self.update_peer_common_number(&peer, number); - } - }, + }, Ok(BlockImportStatus::ImportedUnknown(number, aux, who)) => { if aux.clear_justification_requests { trace!( @@ -1119,7 +1115,7 @@ where self.gap_sync = None; } }, - Err(BlockImportError::IncompleteHeader(who)) => { + Err(BlockImportError::IncompleteHeader(who)) => if let Some(peer) = who { warn!( target: "sync", @@ -1127,9 +1123,8 @@ where ); output.push(Err(BadPeer(peer, rep::INCOMPLETE_HEADER))); output.extend(self.restart()); - } - }, - Err(BlockImportError::VerificationFailed(who, e)) => { + }, + Err(BlockImportError::VerificationFailed(who, e)) => if let Some(peer) = who { warn!( target: "sync", @@ -1140,9 +1135,8 @@ where ); output.push(Err(BadPeer(peer, rep::VERIFICATION_FAIL))); output.extend(self.restart()); - } - }, - Err(BlockImportError::BadBlock(who)) => { + }, + Err(BlockImportError::BadBlock(who)) => if let Some(peer) = who { warn!( target: "sync", @@ -1151,8 +1145,7 @@ where peer, ); output.push(Err(BadPeer(peer, rep::BAD_BLOCK))); - } - }, + }, Err(BlockImportError::MissingState) => { // This may happen if the chain we were requesting upon has been discarded // in the meantime because other chain has been finalized. @@ -1252,7 +1245,7 @@ where } .boxed(), ); - return; + return } // Check if there is a slot for this block announce validation. @@ -1272,7 +1265,7 @@ where } .boxed(), ); - return; + return }, HasSlotForBlockAnnounceValidation::MaximumPeerSlotsReached => { self.block_announce_validation.push(async move { @@ -1285,7 +1278,7 @@ where ); PreValidateBlockAnnounce::Skip }.boxed()); - return; + return }, } @@ -1573,18 +1566,15 @@ where fn required_block_attributes(&self) -> BlockAttributes { match self.mode { - SyncMode::Full => { - BlockAttributes::HEADER | BlockAttributes::JUSTIFICATION | BlockAttributes::BODY - }, + SyncMode::Full => + BlockAttributes::HEADER | BlockAttributes::JUSTIFICATION | BlockAttributes::BODY, SyncMode::Light => BlockAttributes::HEADER | BlockAttributes::JUSTIFICATION, - SyncMode::LightState { storage_chain_mode: false, .. } | SyncMode::Warp => { - BlockAttributes::HEADER | BlockAttributes::JUSTIFICATION | BlockAttributes::BODY - }, - SyncMode::LightState { storage_chain_mode: true, .. } => { - BlockAttributes::HEADER - | BlockAttributes::JUSTIFICATION - | BlockAttributes::INDEXED_BODY - }, + SyncMode::LightState { storage_chain_mode: false, .. } | SyncMode::Warp => + BlockAttributes::HEADER | BlockAttributes::JUSTIFICATION | BlockAttributes::BODY, + SyncMode::LightState { storage_chain_mode: true, .. } => + BlockAttributes::HEADER | + BlockAttributes::JUSTIFICATION | + BlockAttributes::INDEXED_BODY, } } @@ -1661,7 +1651,7 @@ where for (n, peer) in self.peers.iter_mut() { if let PeerSyncState::AncestorSearch { .. } = peer.state { // Wait for ancestry search to complete first. - continue; + continue } let new_common_number = if peer.best_number >= number { number } else { peer.best_number }; @@ -1696,7 +1686,7 @@ where peer: &PeerId, ) -> HasSlotForBlockAnnounceValidation { if self.block_announce_validation.len() >= MAX_CONCURRENT_BLOCK_ANNOUNCE_VALIDATIONS { - return HasSlotForBlockAnnounceValidation::TotalMaximumSlotsReached; + return HasSlotForBlockAnnounceValidation::TotalMaximumSlotsReached } match self.block_announce_validation_per_peer_stats.entry(*peer) { @@ -1722,9 +1712,9 @@ where res: &PreValidateBlockAnnounce, ) { let peer = match res { - PreValidateBlockAnnounce::Failure { who, .. } - | PreValidateBlockAnnounce::Process { who, .. } - | PreValidateBlockAnnounce::Error { who } => who, + PreValidateBlockAnnounce::Failure { who, .. } | + PreValidateBlockAnnounce::Process { who, .. } | + PreValidateBlockAnnounce::Error { who } => who, PreValidateBlockAnnounce::Skip => return, }; @@ -1758,17 +1748,16 @@ where who, disconnect, ); - return PollBlockAnnounceValidation::Failure { who, disconnect }; - }, - PreValidateBlockAnnounce::Process { announce, is_new_best, who } => { - (announce, is_new_best, who) + return PollBlockAnnounceValidation::Failure { who, disconnect } }, + PreValidateBlockAnnounce::Process { announce, is_new_best, who } => + (announce, is_new_best, who), PreValidateBlockAnnounce::Error { .. } | PreValidateBlockAnnounce::Skip => { debug!( target: "sync", "Ignored announce validation", ); - return PollBlockAnnounceValidation::Skip; + return PollBlockAnnounceValidation::Skip }, }; @@ -1792,12 +1781,12 @@ where peer } else { error!(target: "sync", "💔 Called on_block_announce with a bad peer ID"); - return PollBlockAnnounceValidation::Nothing { is_best, who, announce }; + return PollBlockAnnounceValidation::Nothing { is_best, who, announce } }; if let PeerSyncState::AncestorSearch { .. } = peer.state { trace!(target: "sync", "Peer state is ancestor search."); - return PollBlockAnnounceValidation::Nothing { is_best, who, announce }; + return PollBlockAnnounceValidation::Nothing { is_best, who, announce } } if is_best { @@ -1811,8 +1800,8 @@ where if is_best { if known && self.best_queued_number >= number { self.update_peer_common_number(&who, number); - } else if announce.header.parent_hash() == &self.best_queued_hash - || known_parent && self.best_queued_number >= number + } else if announce.header.parent_hash() == &self.best_queued_hash || + known_parent && self.best_queued_number >= number { self.update_peer_common_number(&who, number - One::one()); } @@ -1825,7 +1814,7 @@ where if let Some(target) = self.fork_targets.get_mut(&hash) { target.peers.insert(who); } - return PollBlockAnnounceValidation::Nothing { is_best, who, announce }; + return PollBlockAnnounceValidation::Nothing { is_best, who, announce } } if ancient_parent { @@ -1836,7 +1825,7 @@ where hash, announce.header, ); - return PollBlockAnnounceValidation::Nothing { is_best, who, announce }; + return PollBlockAnnounceValidation::Nothing { is_best, who, announce } } let requires_additional_data = self.mode != SyncMode::Light || !known_parent; @@ -1848,7 +1837,7 @@ where hash, announce.header, ); - return PollBlockAnnounceValidation::ImportHeader { is_best, announce, who }; + return PollBlockAnnounceValidation::ImportHeader { is_best, announce, who } } if self.status().state == SyncState::Idle { @@ -1892,7 +1881,7 @@ where // We make sure our commmon number is at least something we have. p.common_number = self.best_queued_number; self.peers.insert(id, p); - return None; + return None } // handle peers that were in other states. @@ -1926,9 +1915,9 @@ where self.best_queued_hash = info.best_hash; self.best_queued_number = info.best_number; - if self.mode == SyncMode::Full - && self.client.block_status(&BlockId::hash(info.best_hash))? - != BlockStatus::InChainWithState + if self.mode == SyncMode::Full && + self.client.block_status(&BlockId::hash(info.best_hash))? != + BlockStatus::InChainWithState { self.import_existing = true; // Latest state is missing, start with the last finalized state or genesis instead. @@ -1958,7 +1947,7 @@ where /// What is the status of the block corresponding to the given hash? fn block_status(&self, hash: &B::Hash) -> Result { if self.queue_blocks.contains(hash) { - return Ok(BlockStatus::Queued); + return Ok(BlockStatus::Queued) } self.client.block_status(&BlockId::Hash(*hash)) } @@ -2778,7 +2767,7 @@ fn handle_ancestor_search_state( if block_hash_match && next_distance_to_tip == One::one() { // We found the ancestor in the first step so there is no need to execute binary // search. - return None; + return None } if block_hash_match { let left = curr_block_num; @@ -2797,7 +2786,7 @@ fn handle_ancestor_search_state( }, AncestorSearchState::BinarySearch(mut left, mut right) => { if left >= curr_block_num { - return None; + return None } if block_hash_match { left = curr_block_num; @@ -2827,7 +2816,7 @@ fn peer_block_request( ) -> Option<(Range>, BlockRequest)> { if best_num >= peer.best_number { // Will be downloaded as alternative fork instead. - return None; + return None } else if peer.common_number < finalized { trace!( target: "sync", @@ -2908,22 +2897,22 @@ fn fork_sync_request( targets.retain(|hash, r| { if r.number <= finalized { trace!(target: "sync", "Removed expired fork sync request {:?} (#{})", hash, r.number); - return false; + return false } if check_block(hash) != BlockStatus::Unknown { trace!(target: "sync", "Removed obsolete fork sync request {:?} (#{})", hash, r.number); - return false; + return false } true }); for (hash, r) in targets { if !r.peers.contains(&id) { - continue; + continue } // Download the fork only if it is behind or not too far ahead our tip of the chain // Otherwise it should be downloaded in full sync mode. - if r.number <= best_num - || (r.number - best_num).saturated_into::() < MAX_BLOCKS_TO_REQUEST as u32 + if r.number <= best_num || + (r.number - best_num).saturated_into::() < MAX_BLOCKS_TO_REQUEST as u32 { let parent_status = r.parent_hash.as_ref().map_or(BlockStatus::Unknown, check_block); let count = if parent_status == BlockStatus::Unknown { @@ -2942,7 +2931,7 @@ fn fork_sync_request( direction: Direction::Descending, max: Some(count), }, - )); + )) } else { trace!(target: "sync", "Fork too far in the future: {:?} (#{})", hash, r.number); } @@ -2961,7 +2950,7 @@ where T: HeaderMetadata + ?Sized, { if base == block { - return Ok(false); + return Ok(false) } let ancestor = sp_blockchain::lowest_common_ancestor(client, *block, *base)?; @@ -2988,7 +2977,7 @@ fn validate_blocks( blocks.len(), ); - return Err(BadPeer(*who, rep::NOT_REQUESTED)); + return Err(BadPeer(*who, rep::NOT_REQUESTED)) } let block_header = @@ -3008,11 +2997,11 @@ fn validate_blocks( block_header, ); - return Err(BadPeer(*who, rep::NOT_REQUESTED)); + return Err(BadPeer(*who, rep::NOT_REQUESTED)) } - if request.fields.contains(BlockAttributes::HEADER) - && blocks.iter().any(|b| b.header.is_none()) + if request.fields.contains(BlockAttributes::HEADER) && + blocks.iter().any(|b| b.header.is_none()) { trace!( target: "sync", @@ -3020,7 +3009,7 @@ fn validate_blocks( who, ); - return Err(BadPeer(*who, rep::BAD_RESPONSE)); + return Err(BadPeer(*who, rep::BAD_RESPONSE)) } if request.fields.contains(BlockAttributes::BODY) && blocks.iter().any(|b| b.body.is_none()) @@ -3031,7 +3020,7 @@ fn validate_blocks( who, ); - return Err(BadPeer(*who, rep::BAD_RESPONSE)); + return Err(BadPeer(*who, rep::BAD_RESPONSE)) } } @@ -3046,7 +3035,7 @@ fn validate_blocks( b.hash, hash, ); - return Err(BadPeer(*who, rep::BAD_BLOCK)); + return Err(BadPeer(*who, rep::BAD_BLOCK)) } } if let (Some(header), Some(body)) = (&b.header, &b.body) { @@ -3064,7 +3053,7 @@ fn validate_blocks( expected, got, ); - return Err(BadPeer(*who, rep::BAD_BLOCK)); + return Err(BadPeer(*who, rep::BAD_BLOCK)) } } } @@ -3217,9 +3206,9 @@ mod test { // the justification request should be scheduled to the // new peer which is at the given block assert!(sync.justification_requests().any(|(p, r)| { - p == peer_id3 - && r.fields == BlockAttributes::JUSTIFICATION - && r.from == FromBlock::Hash(b1_hash) + p == peer_id3 && + r.fields == BlockAttributes::JUSTIFICATION && + r.from == FromBlock::Hash(b1_hash) })); assert_eq!( @@ -3264,7 +3253,7 @@ mod test { // Poll until we have procssed the block announcement block_on(poll_fn(|cx| loop { if sync.poll_block_announce_validation(cx).is_pending() { - break Poll::Ready(()); + break Poll::Ready(()) } })) } @@ -3664,7 +3653,7 @@ mod test { request } else { // We found the ancenstor - break; + break }; log::trace!(target: "sync", "Request: {:?}", request); @@ -3802,7 +3791,7 @@ mod test { request } else { // We found the ancenstor - break; + break }; log::trace!(target: "sync", "Request: {:?}", request); diff --git a/client/network/sync/src/state.rs b/client/network/sync/src/state.rs index 35bb92c5c5a45..9f64b52334c8a 100644 --- a/client/network/sync/src/state.rs +++ b/client/network/sync/src/state.rs @@ -90,11 +90,11 @@ where pub fn import(&mut self, response: StateResponse) -> ImportResult { if response.entries.is_empty() && response.proof.is_empty() { debug!(target: "sync", "Bad state response"); - return ImportResult::BadResponse; + return ImportResult::BadResponse } if !self.skip_proof && response.proof.is_empty() { debug!(target: "sync", "Missing proof"); - return ImportResult::BadResponse; + return ImportResult::BadResponse } let complete = if !self.skip_proof { debug!(target: "sync", "Importing state from {} trie nodes", response.proof.len()); @@ -103,7 +103,7 @@ where Ok(proof) => proof, Err(e) => { debug!(target: "sync", "Error decoding proof: {:?}", e); - return ImportResult::BadResponse; + return ImportResult::BadResponse }, }; let (values, completed) = match self.client.verify_range_proof( @@ -117,7 +117,7 @@ where "StateResponse failed proof verification: {}", e, ); - return ImportResult::BadResponse; + return ImportResult::BadResponse }, Ok(values) => values, }; diff --git a/client/network/sync/src/warp.rs b/client/network/sync/src/warp.rs index 5a617060f5da2..ab8a7c66b9856 100644 --- a/client/network/sync/src/warp.rs +++ b/client/network/sync/src/warp.rs @@ -131,7 +131,7 @@ where log::debug!(target: "sync", "Unexpected target block response"); TargetBlockImportResult::BadResponse }, - Phase::TargetBlock(header) => { + Phase::TargetBlock(header) => if let Some(block_header) = &block.header { if block_header == header { if block.body.is_some() { @@ -161,8 +161,7 @@ where } else { log::debug!(target: "sync", "Importing target block failed: missing header."); TargetBlockImportResult::BadResponse - } - }, + }, } } @@ -191,8 +190,8 @@ where Phase::TargetBlock(header) => { let request = BlockRequest:: { id: 0, - fields: BlockAttributes::HEADER - | BlockAttributes::BODY | BlockAttributes::JUSTIFICATION, + fields: BlockAttributes::HEADER | + BlockAttributes::BODY | BlockAttributes::JUSTIFICATION, from: FromBlock::Hash(header.hash()), direction: Direction::Ascending, max: Some(1), diff --git a/client/network/test/src/lib.rs b/client/network/test/src/lib.rs index 2cff4115909d6..975d902157310 100644 --- a/client/network/test/src/lib.rs +++ b/client/network/test/src/lib.rs @@ -872,12 +872,11 @@ where let (chain_sync, chain_sync_service, block_announce_config) = ChainSync::new( match network_config.sync_mode { SyncMode::Full => sc_network_common::sync::SyncMode::Full, - SyncMode::Fast { skip_proofs, storage_chain_mode } => { + SyncMode::Fast { skip_proofs, storage_chain_mode } => sc_network_common::sync::SyncMode::LightState { skip_proofs, storage_chain_mode, - } - }, + }, SyncMode::Warp => sc_network_common::sync::SyncMode::Warp, }, client.clone(), @@ -963,10 +962,10 @@ where let mut highest = None; for peer in self.peers().iter() { if peer.is_major_syncing() || peer.network.num_queued_blocks() != 0 { - return Poll::Pending; + return Poll::Pending } if peer.network.num_sync_requests() != 0 { - return Poll::Pending; + return Poll::Pending } match (highest, peer.client.info().best_hash) { (None, b) => highest = Some(b), @@ -985,10 +984,10 @@ where for peer in self.peers().iter() { if peer.is_major_syncing() || peer.network.num_queued_blocks() != 0 { - return Poll::Pending; + return Poll::Pending } if peer.network.num_sync_requests() != 0 { - return Poll::Pending; + return Poll::Pending } } @@ -1003,7 +1002,7 @@ where let num_peers = self.peers().len(); if self.peers().iter().all(|p| p.num_peers() == num_peers - 1) { - return Poll::Ready(()); + return Poll::Ready(()) } Poll::Pending diff --git a/client/network/test/src/sync.rs b/client/network/test/src/sync.rs index 887cdae42feec..4515677d0b1e0 100644 --- a/client/network/test/src/sync.rs +++ b/client/network/test/src/sync.rs @@ -48,7 +48,7 @@ fn sync_peers_works() { net.poll(cx); for peer in 0..3 { if net.peer(peer).num_peers() != 2 { - return Poll::Pending; + return Poll::Pending } } Poll::Ready(()) @@ -74,12 +74,12 @@ fn sync_cycle_from_offline_to_syncing_to_offline() { for peer in 0..3 { // Online if net.peer(peer).is_offline() { - return Poll::Pending; + return Poll::Pending } if peer < 2 { // Major syncing. if net.peer(peer).blocks_count() < 100 && !net.peer(peer).is_major_syncing() { - return Poll::Pending; + return Poll::Pending } } } @@ -91,7 +91,7 @@ fn sync_cycle_from_offline_to_syncing_to_offline() { net.poll(cx); for peer in 0..3 { if net.peer(peer).is_major_syncing() { - return Poll::Pending; + return Poll::Pending } } Poll::Ready(()) @@ -274,15 +274,15 @@ fn sync_justifications() { net.poll(cx); for hash in [hashof10, hashof15, hashof20] { - if net.peer(0).client().justifications(hash).unwrap() - != Some(Justifications::from((*b"FRNK", Vec::new()))) + if net.peer(0).client().justifications(hash).unwrap() != + Some(Justifications::from((*b"FRNK", Vec::new()))) { - return Poll::Pending; + return Poll::Pending } - if net.peer(1).client().justifications(hash).unwrap() - != Some(Justifications::from((*b"FRNK", Vec::new()))) + if net.peer(1).client().justifications(hash).unwrap() != + Some(Justifications::from((*b"FRNK", Vec::new()))) { - return Poll::Pending; + return Poll::Pending } } @@ -313,10 +313,10 @@ fn sync_justifications_across_forks() { block_on(futures::future::poll_fn::<(), _>(|cx| { net.poll(cx); - if net.peer(0).client().justifications(f1_best).unwrap() - == Some(Justifications::from((*b"FRNK", Vec::new()))) - && net.peer(1).client().justifications(f1_best).unwrap() - == Some(Justifications::from((*b"FRNK", Vec::new()))) + if net.peer(0).client().justifications(f1_best).unwrap() == + Some(Justifications::from((*b"FRNK", Vec::new()))) && + net.peer(1).client().justifications(f1_best).unwrap() == + Some(Justifications::from((*b"FRNK", Vec::new()))) { Poll::Ready(()) } else { @@ -429,7 +429,7 @@ fn can_sync_small_non_best_forks() { assert!(net.peer(0).client().header(&BlockId::Hash(small_hash)).unwrap().is_some()); if net.peer(1).client().header(&BlockId::Hash(small_hash)).unwrap().is_none() { - return Poll::Pending; + return Poll::Pending } Poll::Ready(()) })); @@ -440,7 +440,7 @@ fn can_sync_small_non_best_forks() { block_on(futures::future::poll_fn::<(), _>(|cx| { net.poll(cx); if net.peer(1).client().header(&BlockId::Hash(another_fork)).unwrap().is_none() { - return Poll::Pending; + return Poll::Pending } Poll::Ready(()) })); @@ -472,7 +472,7 @@ fn can_sync_forks_ahead_of_the_best_chain() { net.poll(cx); if net.peer(1).client().header(&BlockId::Hash(fork_hash)).unwrap().is_none() { - return Poll::Pending; + return Poll::Pending } Poll::Ready(()) })); @@ -526,7 +526,7 @@ fn can_sync_explicit_forks() { assert!(net.peer(0).client().header(&BlockId::Hash(small_hash)).unwrap().is_some()); if net.peer(1).client().header(&BlockId::Hash(small_hash)).unwrap().is_none() { - return Poll::Pending; + return Poll::Pending } Poll::Ready(()) })); @@ -877,9 +877,9 @@ fn block_announce_data_is_propagated() { // Wait until peer 1 is connected to both nodes. block_on(futures::future::poll_fn::<(), _>(|cx| { net.poll(cx); - if net.peer(1).num_peers() == 2 - && net.peer(0).num_peers() == 1 - && net.peer(2).num_peers() == 1 + if net.peer(1).num_peers() == 2 && + net.peer(0).num_peers() == 1 && + net.peer(2).num_peers() == 1 { Poll::Ready(()) } else { @@ -983,10 +983,10 @@ fn multiple_requests_are_accepted_as_long_as_they_are_not_fulfilled() { block_on(futures::future::poll_fn::<(), _>(|cx| { net.poll(cx); - if net.peer(1).client().justifications(hashof10).unwrap() - != Some(Justifications::from((*b"FRNK", Vec::new()))) + if net.peer(1).client().justifications(hashof10).unwrap() != + Some(Justifications::from((*b"FRNK", Vec::new()))) { - return Poll::Pending; + return Poll::Pending } Poll::Ready(()) @@ -1010,7 +1010,7 @@ fn syncs_all_forks_from_single_peer() { block_on(futures::future::poll_fn::<(), _>(|cx| { net.poll(cx); if net.peer(1).network().best_seen_block() != Some(12) { - return Poll::Pending; + return Poll::Pending } Poll::Ready(()) })); diff --git a/client/network/transactions/src/lib.rs b/client/network/transactions/src/lib.rs index 0e6287923bf5c..5239a94ef23f3 100644 --- a/client/network/transactions/src/lib.rs +++ b/client/network/transactions/src/lib.rs @@ -102,7 +102,7 @@ impl Future for PendingTransaction { let mut this = self.project(); if let Poll::Ready(import_result) = Pin::new(&mut this.validation).poll_unpin(cx) { - return Poll::Ready((this.tx_hash.clone(), import_result)); + return Poll::Ready((this.tx_hash.clone(), import_result)) } Poll::Pending @@ -341,7 +341,7 @@ where Event::NotificationsReceived { remote, messages } => { for (protocol, message) in messages { if protocol != self.protocol_name { - continue; + continue } if let Ok(m) = @@ -364,7 +364,7 @@ where // Accept transactions only when node is not major syncing if self.service.is_major_syncing() { trace!(target: "sync", "{} Ignoring transactions while major syncing", who); - return; + return } trace!(target: "sync", "Received {} transactions from {}", transactions.len(), who); @@ -376,7 +376,7 @@ where "Ignoring any further transactions that exceed `MAX_PENDING_TRANSACTIONS`({}) limit", MAX_PENDING_TRANSACTIONS, ); - break; + break } let hash = self.transaction_pool.hash_of(&t); @@ -402,9 +402,8 @@ where fn on_handle_transaction_import(&mut self, who: PeerId, import: TransactionImport) { match import { - TransactionImport::KnownGood => { - self.service.report_peer(who, rep::ANY_TRANSACTION_REFUND) - }, + TransactionImport::KnownGood => + self.service.report_peer(who, rep::ANY_TRANSACTION_REFUND), TransactionImport::NewGood => self.service.report_peer(who, rep::GOOD_TRANSACTION), TransactionImport::Bad => self.service.report_peer(who, rep::BAD_TRANSACTION), TransactionImport::None => {}, @@ -415,7 +414,7 @@ where pub fn propagate_transaction(&mut self, hash: &H) { // Accept transactions only when node is not major syncing if self.service.is_major_syncing() { - return; + return } debug!(target: "sync", "Propagating transaction [{:?}]", hash); @@ -435,7 +434,7 @@ where for (who, peer) in self.peers.iter_mut() { // never send transactions to the light node if matches!(peer.role, ObservedRole::Light) { - continue; + continue } let (hashes, to_send): (Vec<_>, Vec<_>) = transactions @@ -467,7 +466,7 @@ where fn propagate_transactions(&mut self) { // Accept transactions only when node is not major syncing if self.service.is_major_syncing() { - return; + return } debug!(target: "sync", "Propagating transactions"); diff --git a/client/offchain/src/api.rs b/client/offchain/src/api.rs index 5978a0583b190..7d3dd8302f343 100644 --- a/client/offchain/src/api.rs +++ b/client/offchain/src/api.rs @@ -118,9 +118,8 @@ impl offchain::DbExternalities for Db { "CAS", ); match kind { - StorageKind::PERSISTENT => { - self.persistent.compare_and_set(STORAGE_PREFIX, key, old_value, new_value) - }, + StorageKind::PERSISTENT => + self.persistent.compare_and_set(STORAGE_PREFIX, key, old_value, new_value), StorageKind::LOCAL => unavailable_yet(LOCAL_DB), } } diff --git a/client/offchain/src/api/http.rs b/client/offchain/src/api/http.rs index 05b05ca3e9a26..4c97e5a47058d 100644 --- a/client/offchain/src/api/http.rs +++ b/client/offchain/src/api/http.rs @@ -156,7 +156,7 @@ impl HttpApi { target: LOG_TARGET, "Overflow in offchain worker HTTP request ID assignment" ); - return Err(()); + return Err(()) }, }; self.requests @@ -219,7 +219,7 @@ impl HttpApi { future::MaybeDone::Done(Err(_)) => return Err(HttpError::IoError), future::MaybeDone::Future(_) | future::MaybeDone::Gone => { debug_assert!(matches!(deadline, future::MaybeDone::Done(..))); - return Err(HttpError::DeadlineReached); + return Err(HttpError::DeadlineReached) }, }; @@ -248,13 +248,13 @@ impl HttpApi { match poll_sender(&mut sender) { Err(HttpError::IoError) => { tracing::debug!(target: LOG_TARGET, id = %request_id.0, "Encountered io error while trying to add new chunk to body"); - return Err(HttpError::IoError); + return Err(HttpError::IoError) }, other => { tracing::debug!(target: LOG_TARGET, id = %request_id.0, res = ?other, "Added chunk to body"); self.requests .insert(request_id, HttpApiRequest::Dispatched(Some(sender))); - return other; + return other }, } } else { @@ -263,7 +263,7 @@ impl HttpApi { // Writing an empty body is a hint that we should stop writing. Dropping // the sender. self.requests.insert(request_id, HttpApiRequest::Dispatched(None)); - return Ok(()); + return Ok(()) } }, @@ -279,13 +279,13 @@ impl HttpApi { ) { Err(HttpError::IoError) => { tracing::debug!(target: LOG_TARGET, id = %request_id.0, "Encountered io error while trying to add new chunk to body"); - return Err(HttpError::IoError); + return Err(HttpError::IoError) }, other => { tracing::debug!(target: LOG_TARGET, id = %request_id.0, res = ?other, "Added chunk to body"); self.requests .insert(request_id, HttpApiRequest::Response(response)); - return other; + return other }, } } else { @@ -300,7 +300,7 @@ impl HttpApi { ..response }), ); - return Ok(()); + return Ok(()) } }, @@ -309,16 +309,16 @@ impl HttpApi { // If the request has already failed, return without putting back the request // in the list. - return Err(HttpError::IoError); + return Err(HttpError::IoError) }, - v @ HttpApiRequest::Dispatched(None) - | v @ HttpApiRequest::Response(HttpApiRequestRp { sending_body: None, .. }) => { + v @ HttpApiRequest::Dispatched(None) | + v @ HttpApiRequest::Response(HttpApiRequestRp { sending_body: None, .. }) => { tracing::debug!(target: LOG_TARGET, id = %request_id.0, "Body sending already finished"); // We have already finished sending this body. self.requests.insert(request_id, v); - return Err(HttpError::Invalid); + return Err(HttpError::Invalid) }, } } @@ -335,10 +335,10 @@ impl HttpApi { for id in ids { match self.requests.get_mut(id) { Some(HttpApiRequest::NotDispatched(_, _)) => {}, - Some(HttpApiRequest::Dispatched(sending_body)) - | Some(HttpApiRequest::Response(HttpApiRequestRp { sending_body, .. })) => { + Some(HttpApiRequest::Dispatched(sending_body)) | + Some(HttpApiRequest::Response(HttpApiRequestRp { sending_body, .. })) => { let _ = sending_body.take(); - continue; + continue }, _ => continue, }; @@ -403,7 +403,7 @@ impl HttpApi { }, } } - return output; + return output } } @@ -416,7 +416,7 @@ impl HttpApi { msg } else { debug_assert!(matches!(deadline, future::MaybeDone::Done(..))); - continue; + continue } }; @@ -456,7 +456,7 @@ impl HttpApi { None => { tracing::error!(target: "offchain-worker::http", "Worker has crashed"); - return ids.iter().map(|_| HttpRequestStatus::IoError).collect(); + return ids.iter().map(|_| HttpRequestStatus::IoError).collect() }, } } @@ -496,14 +496,14 @@ impl HttpApi { // and we still haven't received a response. Some(rq @ HttpApiRequest::Dispatched(_)) => { self.requests.insert(request_id, rq); - return Err(HttpError::DeadlineReached); + return Err(HttpError::DeadlineReached) }, // The request has failed. Some(HttpApiRequest::Fail { .. }) => return Err(HttpError::IoError), // Request hasn't been dispatched yet; reading the body is invalid. Some(rq @ HttpApiRequest::NotDispatched(_, _)) => { self.requests.insert(request_id, rq); - return Err(HttpError::Invalid); + return Err(HttpError::Invalid) }, None => return Err(HttpError::Invalid), }; @@ -524,12 +524,12 @@ impl HttpApi { ..response }), ); - return Ok(n); + return Ok(n) }, Err(err) => { // This code should never be reached unless there's a logic error somewhere. tracing::error!(target: "offchain-worker::http", "Failed to read from current read chunk: {:?}", err); - return Err(HttpError::IoError); + return Err(HttpError::IoError) }, } } @@ -550,7 +550,7 @@ impl HttpApi { if let future::MaybeDone::Done(_) = deadline { self.requests.insert(request_id, HttpApiRequest::Response(response)); - return Err(HttpError::DeadlineReached); + return Err(HttpError::DeadlineReached) } } } @@ -565,9 +565,8 @@ impl fmt::Debug for HttpApi { impl fmt::Debug for HttpApiRequest { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match self { - HttpApiRequest::NotDispatched(_, _) => { - f.debug_tuple("HttpApiRequest::NotDispatched").finish() - }, + HttpApiRequest::NotDispatched(_, _) => + f.debug_tuple("HttpApiRequest::NotDispatched").finish(), HttpApiRequest::Dispatched(_) => f.debug_tuple("HttpApiRequest::Dispatched").finish(), HttpApiRequest::Response(HttpApiRequestRp { status_code, headers, .. }) => f .debug_tuple("HttpApiRequest::Response") @@ -662,12 +661,12 @@ impl Future for HttpWorker { let response = match Future::poll(Pin::new(&mut future), cx) { Poll::Pending => { me.requests.push((id, HttpWorkerRequest::Dispatched(future))); - continue; + continue }, Poll::Ready(Ok(response)) => response, Poll::Ready(Err(error)) => { let _ = me.to_api.unbounded_send(WorkerToApi::Fail { id, error }); - continue; // don't insert the request back + continue // don't insert the request back }, }; @@ -685,7 +684,7 @@ impl Future for HttpWorker { me.requests.push((id, HttpWorkerRequest::ReadBody { body, tx: body_tx })); cx.waker().wake_by_ref(); // reschedule in order to poll the new future - continue; + continue }, HttpWorkerRequest::ReadBody { mut body, mut tx } => { @@ -696,7 +695,7 @@ impl Future for HttpWorker { Poll::Ready(Err(_)) => continue, // don't insert the request back Poll::Pending => { me.requests.push((id, HttpWorkerRequest::ReadBody { body, tx })); - continue; + continue }, } @@ -745,12 +744,10 @@ impl fmt::Debug for HttpWorker { impl fmt::Debug for HttpWorkerRequest { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match self { - HttpWorkerRequest::Dispatched(_) => { - f.debug_tuple("HttpWorkerRequest::Dispatched").finish() - }, - HttpWorkerRequest::ReadBody { .. } => { - f.debug_tuple("HttpWorkerRequest::Response").finish() - }, + HttpWorkerRequest::Dispatched(_) => + f.debug_tuple("HttpWorkerRequest::Dispatched").finish(), + HttpWorkerRequest::ReadBody { .. } => + f.debug_tuple("HttpWorkerRequest::Response").finish(), } } } diff --git a/client/offchain/src/api/timestamp.rs b/client/offchain/src/api/timestamp.rs index ae144f392d3ca..4b3f5efddf275 100644 --- a/client/offchain/src/api/timestamp.rs +++ b/client/offchain/src/api/timestamp.rs @@ -60,9 +60,8 @@ pub fn deadline_to_future( future::maybe_done(match deadline.map(timestamp_from_now) { None => Either::Left(future::pending()), // Only apply delay if we need to wait a non-zero duration - Some(duration) if duration <= Duration::from_secs(0) => { - Either::Right(Either::Left(future::ready(()))) - }, + Some(duration) if duration <= Duration::from_secs(0) => + Either::Right(Either::Left(future::ready(()))), Some(duration) => Either::Right(Either::Right(futures_timer::Delay::new(duration))), }) } diff --git a/client/peerset/src/lib.rs b/client/peerset/src/lib.rs index 279def1259fc2..ec09835c4898e 100644 --- a/client/peerset/src/lib.rs +++ b/client/peerset/src/lib.rs @@ -324,7 +324,7 @@ impl Peerset { fn on_add_reserved_peer(&mut self, set_id: SetId, peer_id: PeerId) { let newly_inserted = self.reserved_nodes[set_id.0].0.insert(peer_id); if !newly_inserted { - return; + return } self.data.add_no_slot_node(set_id.0, peer_id); @@ -333,14 +333,14 @@ impl Peerset { fn on_remove_reserved_peer(&mut self, set_id: SetId, peer_id: PeerId) { if !self.reserved_nodes[set_id.0].0.remove(&peer_id) { - return; + return } self.data.remove_no_slot_node(set_id.0, &peer_id); // Nothing more to do if not in reserved-only mode. if !self.reserved_nodes[set_id.0].1 { - return; + return } // If, however, the peerset is in reserved-only mode, then the removed node needs to be @@ -384,7 +384,7 @@ impl Peerset { self.data.connected_peers(set_id.0).cloned().collect::>().into_iter() { if self.reserved_nodes[set_id.0].0.contains(&peer_id) { - continue; + continue } let peer = self.data.peer(set_id.0, &peer_id).into_connected().expect( @@ -417,7 +417,7 @@ impl Peerset { fn on_remove_from_peers_set(&mut self, set_id: SetId, peer_id: PeerId) { // Don't do anything if node is reserved. if self.reserved_nodes[set_id.0].0.contains(&peer_id) { - return; + return } match self.data.peer(set_id.0, &peer_id) { @@ -442,7 +442,7 @@ impl Peerset { trace!(target: "peerset", "Report {}: {:+} to {}. Reason: {}", peer_id, change.value, reputation.reputation(), change.reason ); - return; + return } debug!(target: "peerset", "Report {}: {:+} to {}. Reason: {}, Disconnecting", @@ -509,7 +509,7 @@ impl Peerset { peer_reputation.set_reputation(after); if after != 0 { - continue; + continue } drop(peer_reputation); @@ -550,7 +550,7 @@ impl Peerset { // remove that check. If necessary, the peerset should be refactored to give more // control over what happens in that situation. if entry.reputation() < BANNED_THRESHOLD { - break; + break } match entry.try_outgoing() { @@ -574,7 +574,7 @@ impl Peerset { // Nothing more to do if we're in reserved mode. if self.reserved_nodes[set_id.0].1 { - return; + return } // Try to grab the next node to attempt to connect to. @@ -588,7 +588,7 @@ impl Peerset { // Don't connect to nodes with an abysmal reputation. if next.reputation() < BANNED_THRESHOLD { - break; + break } match next.try_outgoing() { @@ -599,7 +599,7 @@ impl Peerset { // This branch can only be entered if there is no free slot, which is // checked above. debug_assert!(false); - break; + break }, } } @@ -621,7 +621,7 @@ impl Peerset { if self.reserved_nodes[set_id.0].1 && !self.reserved_nodes[set_id.0].0.contains(&peer_id) { self.message_queue.push_back(Message::Reject(index)); - return; + return } let not_connected = match self.data.peer(set_id.0, &peer_id) { @@ -636,7 +636,7 @@ impl Peerset { if not_connected.reputation() < BANNED_THRESHOLD { self.message_queue.push_back(Message::Reject(index)); - return; + return } match not_connected.try_accept_incoming() { @@ -725,7 +725,7 @@ impl Stream for Peerset { fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll> { loop { if let Some(message) = self.message_queue.pop_front() { - return Poll::Ready(Some(message)); + return Poll::Ready(Some(message)) } if Future::poll(Pin::new(&mut self.next_periodic_alloc_slots), cx).is_ready() { @@ -743,28 +743,21 @@ impl Stream for Peerset { }; match action { - Action::AddReservedPeer(set_id, peer_id) => { - self.on_add_reserved_peer(set_id, peer_id) - }, - Action::RemoveReservedPeer(set_id, peer_id) => { - self.on_remove_reserved_peer(set_id, peer_id) - }, - Action::SetReservedPeers(set_id, peer_ids) => { - self.on_set_reserved_peers(set_id, peer_ids) - }, - Action::SetReservedOnly(set_id, reserved) => { - self.on_set_reserved_only(set_id, reserved) - }, + Action::AddReservedPeer(set_id, peer_id) => + self.on_add_reserved_peer(set_id, peer_id), + Action::RemoveReservedPeer(set_id, peer_id) => + self.on_remove_reserved_peer(set_id, peer_id), + Action::SetReservedPeers(set_id, peer_ids) => + self.on_set_reserved_peers(set_id, peer_ids), + Action::SetReservedOnly(set_id, reserved) => + self.on_set_reserved_only(set_id, reserved), Action::ReportPeer(peer_id, score_diff) => self.on_report_peer(peer_id, score_diff), - Action::AddToPeersSet(sets_name, peer_id) => { - self.add_to_peers_set(sets_name, peer_id) - }, - Action::RemoveFromPeersSet(sets_name, peer_id) => { - self.on_remove_from_peers_set(sets_name, peer_id) - }, - Action::PeerReputation(peer_id, pending_response) => { - self.on_peer_reputation(peer_id, pending_response) - }, + Action::AddToPeersSet(sets_name, peer_id) => + self.add_to_peers_set(sets_name, peer_id), + Action::RemoveFromPeersSet(sets_name, peer_id) => + self.on_remove_from_peers_set(sets_name, peer_id), + Action::PeerReputation(peer_id, pending_response) => + self.on_peer_reputation(peer_id, pending_response), } } } diff --git a/client/peerset/src/peersstate.rs b/client/peerset/src/peersstate.rs index e40364bd5dc0b..c9af5b8e2ccd0 100644 --- a/client/peerset/src/peersstate.rs +++ b/client/peerset/src/peersstate.rs @@ -191,12 +191,10 @@ impl PeersState { assert!(set < self.sets.len()); match self.nodes.get_mut(peer_id).map(|p| &p.sets[set]) { - None | Some(MembershipState::NotMember) => { - Peer::Unknown(UnknownPeer { parent: self, set, peer_id: Cow::Borrowed(peer_id) }) - }, - Some(MembershipState::In) | Some(MembershipState::Out) => { - Peer::Connected(ConnectedPeer { state: self, set, peer_id: Cow::Borrowed(peer_id) }) - }, + None | Some(MembershipState::NotMember) => + Peer::Unknown(UnknownPeer { parent: self, set, peer_id: Cow::Borrowed(peer_id) }), + Some(MembershipState::In) | Some(MembershipState::Out) => + Peer::Connected(ConnectedPeer { state: self, set, peer_id: Cow::Borrowed(peer_id) }), Some(MembershipState::NotConnected { .. }) => Peer::NotConnected(NotConnectedPeer { state: self, set, @@ -249,7 +247,7 @@ impl PeersState { .fold(None::<(&PeerId, &mut Node)>, |mut cur_node, to_try| { if let Some(cur_node) = cur_node.take() { if cur_node.1.reputation >= to_try.1.reputation { - return Some(cur_node); + return Some(cur_node) } } Some(to_try) @@ -274,7 +272,7 @@ impl PeersState { pub fn add_no_slot_node(&mut self, set: usize, peer_id: PeerId) { // Reminder: `HashSet::insert` returns false if the node was already in the set if !self.sets[set].no_slot_nodes.insert(peer_id) { - return; + return } if let Some(peer) = self.nodes.get_mut(&peer_id) { @@ -292,7 +290,7 @@ impl PeersState { pub fn remove_no_slot_node(&mut self, set: usize, peer_id: &PeerId) { // Reminder: `HashSet::remove` returns false if the node was already not in the set if !self.sets[set].no_slot_nodes.remove(peer_id) { - return; + return } if let Some(peer) = self.nodes.get_mut(peer_id) { @@ -449,7 +447,7 @@ impl<'a> NotConnectedPeer<'a> { "State inconsistency with {}; not connected after borrow", self.peer_id ); - return Instant::now(); + return Instant::now() }, }; @@ -474,7 +472,7 @@ impl<'a> NotConnectedPeer<'a> { // Note that it is possible for num_out to be strictly superior to the max, in case we were // connected to reserved node then marked them as not reserved. if !self.state.has_free_outgoing_slot(self.set) && !is_no_slot_occupy { - return Err(self); + return Err(self) } if let Some(peer) = self.state.nodes.get_mut(&*self.peer_id) { @@ -500,10 +498,10 @@ impl<'a> NotConnectedPeer<'a> { // Note that it is possible for num_in to be strictly superior to the max, in case we were // connected to reserved node then marked them as not reserved. - if self.state.sets[self.set].num_in >= self.state.sets[self.set].max_in - && !is_no_slot_occupy + if self.state.sets[self.set].num_in >= self.state.sets[self.set].max_in && + !is_no_slot_occupy { - return Err(self); + return Err(self) } if let Some(peer) = self.state.nodes.get_mut(&*self.peer_id) { @@ -544,8 +542,8 @@ impl<'a> NotConnectedPeer<'a> { peer.sets[self.set] = MembershipState::NotMember; // Remove the peer from `self.state.nodes` entirely if it isn't a member of any set. - if peer.reputation == 0 - && peer.sets.iter().all(|set| matches!(set, MembershipState::NotMember)) + if peer.reputation == 0 && + peer.sets.iter().all(|set| matches!(set, MembershipState::NotMember)) { self.state.nodes.remove(&*self.peer_id); } @@ -616,8 +614,8 @@ impl<'a> Reputation<'a> { impl<'a> Drop for Reputation<'a> { fn drop(&mut self) { if let Some(node) = self.node.take() { - if node.get().reputation == 0 - && node.get().sets.iter().all(|set| matches!(set, MembershipState::NotMember)) + if node.get().reputation == 0 && + node.get().sets.iter().all(|set| matches!(set, MembershipState::NotMember)) { node.remove(); } diff --git a/client/peerset/tests/fuzz.rs b/client/peerset/tests/fuzz.rs index 712822f68cf9e..48c5cb341c35a 100644 --- a/client/peerset/tests/fuzz.rs +++ b/client/peerset/tests/fuzz.rs @@ -119,28 +119,26 @@ fn test_once() { }, // If we generate 2, adjust a random reputation. - 2 => { + 2 => if let Some(id) = known_nodes.iter().choose(&mut rng) { let val = Uniform::new_inclusive(i32::MIN, i32::MAX).sample(&mut rng); peerset_handle.report_peer(*id, ReputationChange::new(val, "")); - } - }, + }, // If we generate 3, disconnect from a random node. - 3 => { + 3 => if let Some(id) = connected_nodes.iter().choose(&mut rng).cloned() { connected_nodes.remove(&id); peerset.dropped(SetId::from(0), id, DropReason::Unknown); - } - }, + }, // If we generate 4, connect to a random node. 4 => { if let Some(id) = known_nodes .iter() .filter(|n| { - incoming_nodes.values().all(|m| m != *n) - && !connected_nodes.contains(*n) + incoming_nodes.values().all(|m| m != *n) && + !connected_nodes.contains(*n) }) .choose(&mut rng) { @@ -163,12 +161,11 @@ fn test_once() { reserved_nodes.insert(*id); } }, - 8 => { + 8 => if let Some(id) = reserved_nodes.iter().choose(&mut rng).cloned() { reserved_nodes.remove(&id); peerset_handle.remove_reserved_peer(SetId::from(0), id); - } - }, + }, _ => unreachable!(), } diff --git a/client/rpc-api/src/chain/error.rs b/client/rpc-api/src/chain/error.rs index 30a7ad4e34c5b..670e221cf1cde 100644 --- a/client/rpc-api/src/chain/error.rs +++ b/client/rpc-api/src/chain/error.rs @@ -42,9 +42,8 @@ const BASE_ERROR: i32 = 3000; impl From for JsonRpseeError { fn from(e: Error) -> Self { match e { - Error::Other(message) => { - CallError::Custom(ErrorObject::owned(BASE_ERROR + 1, message, None::<()>)).into() - }, + Error::Other(message) => + CallError::Custom(ErrorObject::owned(BASE_ERROR + 1, message, None::<()>)).into(), e => e.into(), } } diff --git a/client/rpc-api/src/dev/error.rs b/client/rpc-api/src/dev/error.rs index 37ea2a94a9b71..43fd3325fa598 100644 --- a/client/rpc-api/src/dev/error.rs +++ b/client/rpc-api/src/dev/error.rs @@ -51,18 +51,14 @@ impl From for JsonRpseeError { let msg = e.to_string(); match e { - Error::BlockQueryError(_) => { - CallError::Custom(ErrorObject::owned(BASE_ERROR + 1, msg, None::<()>)) - }, - Error::BlockExecutionFailed => { - CallError::Custom(ErrorObject::owned(BASE_ERROR + 3, msg, None::<()>)) - }, - Error::WitnessCompactionFailed => { - CallError::Custom(ErrorObject::owned(BASE_ERROR + 4, msg, None::<()>)) - }, - Error::ProofExtractionFailed => { - CallError::Custom(ErrorObject::owned(BASE_ERROR + 5, msg, None::<()>)) - }, + Error::BlockQueryError(_) => + CallError::Custom(ErrorObject::owned(BASE_ERROR + 1, msg, None::<()>)), + Error::BlockExecutionFailed => + CallError::Custom(ErrorObject::owned(BASE_ERROR + 3, msg, None::<()>)), + Error::WitnessCompactionFailed => + CallError::Custom(ErrorObject::owned(BASE_ERROR + 4, msg, None::<()>)), + Error::ProofExtractionFailed => + CallError::Custom(ErrorObject::owned(BASE_ERROR + 5, msg, None::<()>)), Error::UnsafeRpcCalled(e) => e.into(), } .into() diff --git a/client/rpc-api/src/state/error.rs b/client/rpc-api/src/state/error.rs index 0a6b6bc889d26..b1df64b4789ab 100644 --- a/client/rpc-api/src/state/error.rs +++ b/client/rpc-api/src/state/error.rs @@ -60,14 +60,12 @@ const BASE_ERROR: i32 = 4000; impl From for JsonRpseeError { fn from(e: Error) -> Self { match e { - Error::InvalidBlockRange { .. } => { + Error::InvalidBlockRange { .. } => CallError::Custom(ErrorObject::owned(BASE_ERROR + 1, e.to_string(), None::<()>)) - .into() - }, - Error::InvalidCount { .. } => { + .into(), + Error::InvalidCount { .. } => CallError::Custom(ErrorObject::owned(BASE_ERROR + 2, e.to_string(), None::<()>)) - .into() - }, + .into(), e => Self::to_call_error(e), } } diff --git a/client/rpc-api/src/system/error.rs b/client/rpc-api/src/system/error.rs index bfefc348f4117..777f8c6c6df0b 100644 --- a/client/rpc-api/src/system/error.rs +++ b/client/rpc-api/src/system/error.rs @@ -48,9 +48,8 @@ const MALFORMATTED_PEER_ARG_ERROR: i32 = BASE_ERROR + 2; impl From for JsonRpseeError { fn from(e: Error) -> Self { match e { - Error::NotHealthy(ref h) => { - CallError::Custom(ErrorObject::owned(NOT_HEALTHY_ERROR, e.to_string(), Some(h))) - }, + Error::NotHealthy(ref h) => + CallError::Custom(ErrorObject::owned(NOT_HEALTHY_ERROR, e.to_string(), Some(h))), Error::MalformattedPeerArg(e) => CallError::Custom(ErrorObject::owned( MALFORMATTED_PEER_ARG_ERROR + 2, e, diff --git a/client/rpc-spec-v2/src/transaction/error.rs b/client/rpc-spec-v2/src/transaction/error.rs index ba7a02928decf..72a5959992f9e 100644 --- a/client/rpc-spec-v2/src/transaction/error.rs +++ b/client/rpc-spec-v2/src/transaction/error.rs @@ -41,11 +41,10 @@ impl From for TransactionEvent { Error::Verification(e) => TransactionEvent::Invalid(TransactionError { error: format!("Verification error: {}", e), }), - Error::Pool(PoolError::InvalidTransaction(InvalidTransaction::Custom(e))) => { + Error::Pool(PoolError::InvalidTransaction(InvalidTransaction::Custom(e))) => TransactionEvent::Invalid(TransactionError { error: format!("Invalid transaction with custom error: {}", e), - }) - }, + }), Error::Pool(PoolError::InvalidTransaction(e)) => { let msg: &str = e.into(); TransactionEvent::Invalid(TransactionError { @@ -58,32 +57,28 @@ impl From for TransactionEvent { error: format!("Unknown transaction validity: {}", msg), }) }, - Error::Pool(PoolError::TemporarilyBanned) => { + Error::Pool(PoolError::TemporarilyBanned) => TransactionEvent::Invalid(TransactionError { error: "Transaction is temporarily banned".into(), - }) - }, - Error::Pool(PoolError::AlreadyImported(_)) => { + }), + Error::Pool(PoolError::AlreadyImported(_)) => TransactionEvent::Invalid(TransactionError { error: "Transaction is already imported".into(), - }) - }, - Error::Pool(PoolError::TooLowPriority { old, new }) => { + }), + Error::Pool(PoolError::TooLowPriority { old, new }) => TransactionEvent::Invalid(TransactionError { error: format!( "The priority of the transactin is too low (pool {} > current {})", old, new ), - }) - }, + }), Error::Pool(PoolError::CycleDetected) => TransactionEvent::Invalid(TransactionError { error: "The transaction contains a cyclic dependency".into(), }), - Error::Pool(PoolError::ImmediatelyDropped) => { + Error::Pool(PoolError::ImmediatelyDropped) => TransactionEvent::Invalid(TransactionError { error: "The transaction could not enter the pool because of the limit".into(), - }) - }, + }), Error::Pool(PoolError::Unactionable) => TransactionEvent::Invalid(TransactionError { error: "Transaction cannot be propagated and the local node does not author blocks" .into(), @@ -92,16 +87,14 @@ impl From for TransactionEvent { error: "Transaction does not provide any tags, so the pool cannot identify it" .into(), }), - Error::Pool(PoolError::InvalidBlockId(_)) => { + Error::Pool(PoolError::InvalidBlockId(_)) => TransactionEvent::Invalid(TransactionError { error: "The provided block ID is not valid".into(), - }) - }, - Error::Pool(PoolError::RejectedFutureTransaction) => { + }), + Error::Pool(PoolError::RejectedFutureTransaction) => TransactionEvent::Invalid(TransactionError { error: "The pool is not accepting future transactions".into(), - }) - }, + }), } } } diff --git a/client/rpc-spec-v2/src/transaction/event.rs b/client/rpc-spec-v2/src/transaction/event.rs index 7a9710aaea79a..3c75eaff10fd4 100644 --- a/client/rpc-spec-v2/src/transaction/event.rs +++ b/client/rpc-spec-v2/src/transaction/event.rs @@ -186,27 +186,20 @@ enum TransactionEventIR { impl From> for TransactionEventIR { fn from(value: TransactionEvent) -> Self { match value { - TransactionEvent::Validated => { - TransactionEventIR::NonBlock(TransactionEventNonBlockIR::Validated) - }, - TransactionEvent::Broadcasted(event) => { - TransactionEventIR::NonBlock(TransactionEventNonBlockIR::Broadcasted(event)) - }, - TransactionEvent::BestChainBlockIncluded(event) => { - TransactionEventIR::Block(TransactionEventBlockIR::BestChainBlockIncluded(event)) - }, - TransactionEvent::Finalized(event) => { - TransactionEventIR::Block(TransactionEventBlockIR::Finalized(event)) - }, - TransactionEvent::Error(event) => { - TransactionEventIR::NonBlock(TransactionEventNonBlockIR::Error(event)) - }, - TransactionEvent::Invalid(event) => { - TransactionEventIR::NonBlock(TransactionEventNonBlockIR::Invalid(event)) - }, - TransactionEvent::Dropped(event) => { - TransactionEventIR::NonBlock(TransactionEventNonBlockIR::Dropped(event)) - }, + TransactionEvent::Validated => + TransactionEventIR::NonBlock(TransactionEventNonBlockIR::Validated), + TransactionEvent::Broadcasted(event) => + TransactionEventIR::NonBlock(TransactionEventNonBlockIR::Broadcasted(event)), + TransactionEvent::BestChainBlockIncluded(event) => + TransactionEventIR::Block(TransactionEventBlockIR::BestChainBlockIncluded(event)), + TransactionEvent::Finalized(event) => + TransactionEventIR::Block(TransactionEventBlockIR::Finalized(event)), + TransactionEvent::Error(event) => + TransactionEventIR::NonBlock(TransactionEventNonBlockIR::Error(event)), + TransactionEvent::Invalid(event) => + TransactionEventIR::NonBlock(TransactionEventNonBlockIR::Invalid(event)), + TransactionEvent::Dropped(event) => + TransactionEventIR::NonBlock(TransactionEventNonBlockIR::Dropped(event)), } } } @@ -216,18 +209,16 @@ impl From> for TransactionEvent { match value { TransactionEventIR::NonBlock(status) => match status { TransactionEventNonBlockIR::Validated => TransactionEvent::Validated, - TransactionEventNonBlockIR::Broadcasted(event) => { - TransactionEvent::Broadcasted(event) - }, + TransactionEventNonBlockIR::Broadcasted(event) => + TransactionEvent::Broadcasted(event), TransactionEventNonBlockIR::Error(event) => TransactionEvent::Error(event), TransactionEventNonBlockIR::Invalid(event) => TransactionEvent::Invalid(event), TransactionEventNonBlockIR::Dropped(event) => TransactionEvent::Dropped(event), }, TransactionEventIR::Block(block) => match block { TransactionEventBlockIR::Finalized(event) => TransactionEvent::Finalized(event), - TransactionEventBlockIR::BestChainBlockIncluded(event) => { - TransactionEvent::BestChainBlockIncluded(event) - }, + TransactionEventBlockIR::BestChainBlockIncluded(event) => + TransactionEvent::BestChainBlockIncluded(event), }, } } diff --git a/client/rpc-spec-v2/src/transaction/transaction.rs b/client/rpc-spec-v2/src/transaction/transaction.rs index ffafc8626b64d..e2cf736dff17a 100644 --- a/client/rpc-spec-v2/src/transaction/transaction.rs +++ b/client/rpc-spec-v2/src/transaction/transaction.rs @@ -102,7 +102,7 @@ where None::<()>, )); let _ = sink.reject(err); - return Ok(()); + return Ok(()) }, }; @@ -170,9 +170,8 @@ impl TransactionState { event: TransactionStatus, ) -> Option> { match event { - TransactionStatus::Ready | TransactionStatus::Future => { - Some(TransactionEvent::::Validated) - }, + TransactionStatus::Ready | TransactionStatus::Future => + Some(TransactionEvent::::Validated), TransactionStatus::Broadcast(peers) => { // Set the broadcasted flag once if we submitted the transaction to // at least one peer. @@ -182,22 +181,19 @@ impl TransactionState { num_peers: peers.len(), })) }, - TransactionStatus::InBlock((hash, index)) => { + TransactionStatus::InBlock((hash, index)) => Some(TransactionEvent::BestChainBlockIncluded(Some(TransactionBlock { hash, index, - }))) - }, + }))), TransactionStatus::Retracted(_) => Some(TransactionEvent::BestChainBlockIncluded(None)), - TransactionStatus::FinalityTimeout(_) => { + TransactionStatus::FinalityTimeout(_) => Some(TransactionEvent::Dropped(TransactionDropped { broadcasted: self.broadcasted, error: "Maximum number of finality watchers has been reached".into(), - })) - }, - TransactionStatus::Finalized((hash, index)) => { - Some(TransactionEvent::Finalized(TransactionBlock { hash, index })) - }, + })), + TransactionStatus::Finalized((hash, index)) => + Some(TransactionEvent::Finalized(TransactionBlock { hash, index })), TransactionStatus::Usurped(_) => Some(TransactionEvent::Invalid(TransactionError { error: "Extrinsic was rendered invalid by another extrinsic".into(), })), diff --git a/client/rpc/src/author/mod.rs b/client/rpc/src/author/mod.rs index acdff901e6de0..7d0ffdc62e080 100644 --- a/client/rpc/src/author/mod.rs +++ b/client/rpc/src/author/mod.rs @@ -183,7 +183,7 @@ where Ok(dxt) => dxt, Err(e) => { let _ = sink.reject(JsonRpseeError::from(e)); - return Ok(()); + return Ok(()) }, }; @@ -201,7 +201,7 @@ where Ok(stream) => stream, Err(err) => { let _ = sink.reject(JsonRpseeError::from(err)); - return; + return }, }; diff --git a/client/rpc/src/dev/mod.rs b/client/rpc/src/dev/mod.rs index e9273a1b206a2..7f4b68f56f6f6 100644 --- a/client/rpc/src/dev/mod.rs +++ b/client/rpc/src/dev/mod.rs @@ -80,7 +80,7 @@ where header.digest_mut().logs.retain(|item| !matches!(item, DigestItem::Seal(_, _))); Block::new(header, body) } else { - return Ok(None); + return Ok(None) } }; let parent_header = { @@ -92,7 +92,7 @@ where if let Some(header) = parent_header { header } else { - return Ok(None); + return Ok(None) } }; let block_len = block.encoded_size() as u64; diff --git a/client/rpc/src/state/mod.rs b/client/rpc/src/state/mod.rs index 679c6d48ddc29..7213e4360ae2b 100644 --- a/client/rpc/src/state/mod.rs +++ b/client/rpc/src/state/mod.rs @@ -240,7 +240,7 @@ where return Err(JsonRpseeError::from(Error::InvalidCount { value: count, max: STORAGE_KEYS_PAGED_MAX_COUNT, - })); + })) } self.backend .storage_keys_paged(block, prefix, count, start_key) @@ -332,7 +332,7 @@ where if keys.is_none() { if let Err(err) = self.deny_unsafe.check_if_safe() { let _ = sink.reject(JsonRpseeError::from(err)); - return Ok(()); + return Ok(()) } } diff --git a/client/rpc/src/state/state_full.rs b/client/rpc/src/state/state_full.rs index 0b6b485e71c0a..64b6cacaad700 100644 --- a/client/rpc/src/state/state_full.rs +++ b/client/rpc/src/state/state_full.rs @@ -105,7 +105,7 @@ where &from_meta, &to_meta, "from number > to number".to_owned(), - )); + )) } // check if we can get from `to` to `from` by going through parent_hashes. @@ -126,7 +126,7 @@ where &from_meta, &to_meta, "from and to are on different forks".to_owned(), - )); + )) } hashes.reverse(); hashes @@ -365,7 +365,7 @@ where Ok(initial) => initial, Err(e) => { let _ = sink.reject(JsonRpseeError::from(e)); - return; + return }, }; @@ -403,7 +403,7 @@ where Ok(stream) => stream, Err(blockchain_err) => { let _ = sink.reject(JsonRpseeError::from(Error::Client(Box::new(blockchain_err)))); - return; + return }, }; @@ -488,9 +488,8 @@ where self.block_or_best(block) .and_then(|block| { let child_info = match ChildType::from_prefixed_key(&storage_key) { - Some((ChildType::ParentKeyId, storage_key)) => { - ChildInfo::new_default(storage_key) - }, + Some((ChildType::ParentKeyId, storage_key)) => + ChildInfo::new_default(storage_key), None => return Err(sp_blockchain::Error::InvalidChildStorageKey), }; self.client @@ -514,9 +513,8 @@ where self.block_or_best(block) .and_then(|block| { let child_info = match ChildType::from_prefixed_key(&storage_key) { - Some((ChildType::ParentKeyId, storage_key)) => { - ChildInfo::new_default(storage_key) - }, + Some((ChildType::ParentKeyId, storage_key)) => + ChildInfo::new_default(storage_key), None => return Err(sp_blockchain::Error::InvalidChildStorageKey), }; self.client.child_storage_keys(block, &child_info, &prefix) @@ -535,9 +533,8 @@ where self.block_or_best(block) .and_then(|block| { let child_info = match ChildType::from_prefixed_key(&storage_key) { - Some((ChildType::ParentKeyId, storage_key)) => { - ChildInfo::new_default(storage_key) - }, + Some((ChildType::ParentKeyId, storage_key)) => + ChildInfo::new_default(storage_key), None => return Err(sp_blockchain::Error::InvalidChildStorageKey), }; self.client.child_storage_keys_iter( @@ -560,9 +557,8 @@ where self.block_or_best(block) .and_then(|block| { let child_info = match ChildType::from_prefixed_key(&storage_key) { - Some((ChildType::ParentKeyId, storage_key)) => { - ChildInfo::new_default(storage_key) - }, + Some((ChildType::ParentKeyId, storage_key)) => + ChildInfo::new_default(storage_key), None => return Err(sp_blockchain::Error::InvalidChildStorageKey), }; self.client.child_storage(block, &child_info, &key) @@ -581,7 +577,7 @@ where { Arc::new(ChildInfo::new_default(storage_key)) } else { - return Err(client_err(sp_blockchain::Error::InvalidChildStorageKey)); + return Err(client_err(sp_blockchain::Error::InvalidChildStorageKey)) }; let block = self.block_or_best(block).map_err(client_err)?; let client = self.client.clone(); @@ -602,9 +598,8 @@ where self.block_or_best(block) .and_then(|block| { let child_info = match ChildType::from_prefixed_key(&storage_key) { - Some((ChildType::ParentKeyId, storage_key)) => { - ChildInfo::new_default(storage_key) - }, + Some((ChildType::ParentKeyId, storage_key)) => + ChildInfo::new_default(storage_key), None => return Err(sp_blockchain::Error::InvalidChildStorageKey), }; self.client.child_storage_hash(block, &child_info, &key) diff --git a/client/rpc/src/system/tests.rs b/client/rpc/src/system/tests.rs index dc7be4f2b35f1..2f91648008ff7 100644 --- a/client/rpc/src/system/tests.rs +++ b/client/rpc/src/system/tests.rs @@ -101,17 +101,15 @@ fn api>>(sync: T) -> RpcModule> { Request::NetworkAddReservedPeer(peer, sender) => { let _ = match sc_network_common::config::parse_str_addr(&peer) { Ok(_) => sender.send(Ok(())), - Err(s) => { - sender.send(Err(error::Error::MalformattedPeerArg(s.to_string()))) - }, + Err(s) => + sender.send(Err(error::Error::MalformattedPeerArg(s.to_string()))), }; }, Request::NetworkRemoveReservedPeer(peer, sender) => { let _ = match peer.parse::() { Ok(_) => sender.send(Ok(())), - Err(s) => { - sender.send(Err(error::Error::MalformattedPeerArg(s.to_string()))) - }, + Err(s) => + sender.send(Err(error::Error::MalformattedPeerArg(s.to_string()))), }; }, Request::NetworkReservedPeers(sender) => { @@ -370,7 +368,7 @@ fn test_add_reset_log_filter() { }; futures::executor::block_on(fut).expect("`system_resetLogFilter` failed"); } else if line.contains("exit") { - return; + return } log::trace!(target: "test_before_add", "{}", EXPECTED_WITH_TRACE); log::debug!(target: "test_before_add", "{}", EXPECTED_BEFORE_ADD); diff --git a/client/service/src/builder.rs b/client/service/src/builder.rs index 1e434a3662459..63d60fb06f471 100644 --- a/client/service/src/builder.rs +++ b/client/service/src/builder.rs @@ -117,9 +117,8 @@ impl KeystoreContainer { /// Construct KeystoreContainer pub fn new(config: &KeystoreConfig) -> Result { let keystore = Arc::new(match config { - KeystoreConfig::Path { path, password } => { - LocalKeystore::open(path.clone(), password.clone())? - }, + KeystoreConfig::Path { path, password } => + LocalKeystore::open(path.clone(), password.clone())?, KeystoreConfig::InMemory => LocalKeystore::in_memory(), }); @@ -773,7 +772,7 @@ where let mut request_response_protocol_configs = Vec::new(); if warp_sync.is_none() && config.network.sync_mode.is_warp() { - return Err("Warp sync enabled, but no warp sync provider configured.".into()); + return Err("Warp sync enabled, but no warp sync provider configured.".into()) } if client.requires_full_sync() { @@ -798,8 +797,8 @@ where &protocol_id, config.chain_spec.fork_id(), client.clone(), - config.network.default_peers_set.in_peers as usize - + config.network.default_peers_set.out_peers as usize, + config.network.default_peers_set.in_peers as usize + + config.network.default_peers_set.out_peers as usize, ); spawn_handle.spawn("block-request-handler", Some("networking"), handler.run()); protocol_config @@ -850,9 +849,8 @@ where let (chain_sync, chain_sync_service, block_announce_config) = ChainSync::new( match config.network.sync_mode { SyncMode::Full => sc_network_common::sync::SyncMode::Full, - SyncMode::Fast { skip_proofs, storage_chain_mode } => { - sc_network_common::sync::SyncMode::LightState { skip_proofs, storage_chain_mode } - }, + SyncMode::Fast { skip_proofs, storage_chain_mode } => + sc_network_common::sync::SyncMode::LightState { skip_proofs, storage_chain_mode }, SyncMode::Warp => sc_network_common::sync::SyncMode::Warp, }, client.clone(), @@ -976,7 +974,7 @@ where ); // This `return` might seem unnecessary, but we don't want to make it look like // everything is working as normal even though the user is clearly misusing the API. - return; + return } future.await diff --git a/client/service/src/chain_ops/export_blocks.rs b/client/service/src/chain_ops/export_blocks.rs index ec7b6950f1d35..d442a11f2c39b 100644 --- a/client/service/src/chain_ops/export_blocks.rs +++ b/client/service/src/chain_ops/export_blocks.rs @@ -61,7 +61,7 @@ where let client = &client; if last < block { - return Poll::Ready(Err("Invalid block range specified".into())); + return Poll::Ready(Err("Invalid block range specified".into())) } if !wrote_header { @@ -76,14 +76,13 @@ where } match client.block(&BlockId::number(block))? { - Some(block) => { + Some(block) => if binary { output.write_all(&block.encode())?; } else { serde_json::to_writer(&mut output, &block) .map_err(|e| format!("Error writing JSON: {}", e))?; - } - }, + }, // Reached end of the chain. None => return Poll::Ready(Ok(())), } @@ -91,7 +90,7 @@ where info!("#{}", block); } if block == last { - return Poll::Ready(Ok(())); + return Poll::Ready(Ok(())) } block += One::one(); diff --git a/client/service/src/chain_ops/import_blocks.rs b/client/service/src/chain_ops/import_blocks.rs index ea4ed1e9d2c7d..c0612124dd0c2 100644 --- a/client/service/src/chain_ops/import_blocks.rs +++ b/client/service/src/chain_ops/import_blocks.rs @@ -102,8 +102,8 @@ where /// Returns the number of blocks read thus far. fn read_block_count(&self) -> u64 { match self { - BlockIter::Binary { read_block_count, .. } - | BlockIter::Json { read_block_count, .. } => *read_block_count, + BlockIter::Binary { read_block_count, .. } | + BlockIter::Json { read_block_count, .. } => *read_block_count, } } @@ -227,8 +227,8 @@ impl Speedometer { let speed = diff .saturating_mul(10_000) .checked_div(u128::from(elapsed_ms)) - .map_or(0.0, |s| s as f64) - / 10.0; + .map_or(0.0, |s| s as f64) / + 10.0; info!("📦 Current best block: {} ({:4.1} bps)", self.best_number, speed); } else { // If the number of blocks can't be converted to a regular integer, then we need a more @@ -324,7 +324,7 @@ where if let (Err(err), hash) = result { warn!("There was an error importing block with hash {:?}: {}", hash, err); self.has_error = true; - break; + break } } } @@ -338,7 +338,7 @@ where Err(e) => { // We've encountered an error while creating the block iterator // so we can just return a future that returns an error. - return future::ready(Err(Error::Other(e))).boxed(); + return future::ready(Err(Error::Other(e))).boxed() }, }; @@ -388,12 +388,11 @@ where state = Some(ImportState::Reading { block_iter }); } }, - Err(e) => { + Err(e) => return Poll::Ready(Err(Error::Other(format!( "Error reading block #{}: {}", read_block_count, e - )))) - }, + )))), } }, } @@ -409,7 +408,7 @@ where delay, block, }); - return Poll::Pending; + return Poll::Pending }, Poll::Ready(_) => { delay.reset(Duration::from_millis(DELAY_TIME)); @@ -441,7 +440,7 @@ where read_block_count, client.info().best_number ); - return Poll::Ready(Ok(())); + return Poll::Ready(Ok(())) } else { // Importing is not done, we still have to wait for the queue to finish. // Wait for the delay, because we know the queue is lagging behind. @@ -452,7 +451,7 @@ where read_block_count, delay, }); - return Poll::Pending; + return Poll::Pending }, Poll::Ready(_) => { delay.reset(Duration::from_millis(DELAY_TIME)); @@ -477,7 +476,7 @@ where return Poll::Ready(Err(Error::Other(format!( "Stopping after #{} blocks because of an error", link.imported_blocks - )))); + )))) } cx.waker().wake_by_ref(); diff --git a/client/service/src/client/block_rules.rs b/client/service/src/client/block_rules.rs index 1fa4999d59525..2ed27b8fe1b63 100644 --- a/client/service/src/client/block_rules.rs +++ b/client/service/src/client/block_rules.rs @@ -61,12 +61,12 @@ impl BlockRules { pub fn lookup(&self, number: NumberFor, hash: &B::Hash) -> LookupResult { if let Some(hash_for_height) = self.forks.get(&number) { if hash_for_height != hash { - return LookupResult::Expected(*hash_for_height); + return LookupResult::Expected(*hash_for_height) } } if self.bad.contains(hash) { - return LookupResult::KnownBad; + return LookupResult::KnownBad } LookupResult::NotSpecial diff --git a/client/service/src/client/client.rs b/client/service/src/client/client.rs index c56745d48114d..1d896d8acd8bf 100644 --- a/client/service/src/client/client.rs +++ b/client/service/src/client/client.rs @@ -466,7 +466,7 @@ where } = import_block; if !intermediates.is_empty() { - return Err(Error::IncompletePipeline); + return Err(Error::IncompletePipeline) } let fork_choice = fork_choice.ok_or(Error::IncompletePipeline)?; @@ -548,8 +548,8 @@ where { let parent_hash = *import_headers.post().parent_hash(); let status = self.backend.blockchain().status(BlockId::Hash(hash))?; - let parent_exists = self.backend.blockchain().status(BlockId::Hash(parent_hash))? - == blockchain::BlockStatus::InChain; + let parent_exists = self.backend.blockchain().status(BlockId::Hash(parent_hash))? == + blockchain::BlockStatus::InChain; match (import_existing, status) { (false, blockchain::BlockStatus::InChain) => return Ok(ImportResult::AlreadyInChain), (false, blockchain::BlockStatus::Unknown) => {}, @@ -566,20 +566,19 @@ where // the block is lower than our last finalized block so it must revert // finality, refusing import. - if status == blockchain::BlockStatus::Unknown - && *import_headers.post().number() <= info.finalized_number - && !gap_block + if status == blockchain::BlockStatus::Unknown && + *import_headers.post().number() <= info.finalized_number && + !gap_block { - return Err(sp_blockchain::Error::NotInFinalizedChain); + return Err(sp_blockchain::Error::NotInFinalizedChain) } // this is a fairly arbitrary choice of where to draw the line on making notifications, // but the general goal is to only make notifications when we are already fully synced // and get a new chain head. let make_notifications = match origin { - BlockOrigin::NetworkBroadcast | BlockOrigin::Own | BlockOrigin::ConsensusBroadcast => { - true - }, + BlockOrigin::NetworkBroadcast | BlockOrigin::Own | BlockOrigin::ConsensusBroadcast => + true, BlockOrigin::Genesis | BlockOrigin::NetworkInitialSync | BlockOrigin::File => false, }; @@ -613,14 +612,12 @@ where let storage_key = PrefixedStorageKey::new_ref(&parent_storage); let storage_key = match ChildType::from_prefixed_key(storage_key) { - Some((ChildType::ParentKeyId, storage_key)) => { - storage_key - }, - None => { + Some((ChildType::ParentKeyId, storage_key)) => + storage_key, + None => return Err(Error::Backend( "Invalid child storage key.".to_string(), - )) - }, + )), }; let entry = storage .children_default @@ -645,7 +642,7 @@ where // State root mismatch when importing state. This should not happen in // safe fast sync mode, but may happen in unsafe mode. warn!("Error importing state: State root mismatch."); - return Err(Error::InvalidStateRoot); + return Err(Error::InvalidStateRoot) } None }, @@ -669,12 +666,11 @@ where )?; } - let is_new_best = !gap_block - && (finalized - || match fork_choice { - ForkChoiceStrategy::LongestChain => { - import_headers.post().number() > &info.best_number - }, + let is_new_best = !gap_block && + (finalized || + match fork_choice { + ForkChoiceStrategy::LongestChain => + import_headers.post().number() > &info.best_number, ForkChoiceStrategy::Custom(v) => v, }); @@ -784,21 +780,18 @@ where let at = BlockId::Hash(*parent_hash); let state_action = std::mem::replace(&mut import_block.state_action, StateAction::Skip); let (enact_state, storage_changes) = match (self.block_status(&at)?, state_action) { - (BlockStatus::KnownBad, _) => { - return Ok(PrepareStorageChangesResult::Discard(ImportResult::KnownBad)) - }, + (BlockStatus::KnownBad, _) => + return Ok(PrepareStorageChangesResult::Discard(ImportResult::KnownBad)), ( BlockStatus::InChainPruned, StateAction::ApplyChanges(sc_consensus::StorageChanges::Changes(_)), ) => return Ok(PrepareStorageChangesResult::Discard(ImportResult::MissingState)), (_, StateAction::ApplyChanges(changes)) => (true, Some(changes)), - (BlockStatus::Unknown, _) => { - return Ok(PrepareStorageChangesResult::Discard(ImportResult::UnknownParent)) - }, + (BlockStatus::Unknown, _) => + return Ok(PrepareStorageChangesResult::Discard(ImportResult::UnknownParent)), (_, StateAction::Skip) => (false, None), - (BlockStatus::InChainPruned, StateAction::Execute) => { - return Ok(PrepareStorageChangesResult::Discard(ImportResult::MissingState)) - }, + (BlockStatus::InChainPruned, StateAction::Execute) => + return Ok(PrepareStorageChangesResult::Discard(ImportResult::MissingState)), (BlockStatus::InChainPruned, StateAction::ExecuteIfPossible) => (false, None), (_, StateAction::Execute) => (true, None), (_, StateAction::ExecuteIfPossible) => (true, None), @@ -827,7 +820,7 @@ where if import_block.header.state_root() != &gen_storage_changes.transaction_storage_root { - return Err(Error::InvalidStateRoot); + return Err(Error::InvalidStateRoot) } Some(sc_consensus::StorageChanges::Changes(gen_storage_changes)) }, @@ -856,7 +849,7 @@ where "Possible safety violation: attempted to re-finalize last finalized block {:?} ", last_finalized ); - return Ok(()); + return Ok(()) } let route_from_finalized = @@ -869,7 +862,7 @@ where retracted, last_finalized ); - return Err(sp_blockchain::Error::NotInFinalizedChain); + return Err(sp_blockchain::Error::NotInFinalizedChain) } let route_from_best = @@ -938,7 +931,7 @@ where // since we won't be running the loop below which // would also remove any closed sinks. sinks.retain(|sink| !sink.is_closed()); - return Ok(()); + return Ok(()) }, }; @@ -970,7 +963,7 @@ where // temporary leak of closed/discarded notification sinks (e.g. // from consensus code). self.import_notification_sinks.lock().retain(|sink| !sink.is_closed()); - return Ok(()); + return Ok(()) }, }; @@ -1031,7 +1024,7 @@ where // this can probably be implemented more efficiently if let BlockId::Hash(ref h) = id { if self.importing_block.read().as_ref().map_or(false, |importing| h == importing) { - return Ok(BlockStatus::Queued); + return Ok(BlockStatus::Queued) } } let hash_and_number = match *id { @@ -1039,13 +1032,12 @@ where BlockId::Number(n) => self.backend.blockchain().hash(n)?.map(|hash| (hash, n)), }; match hash_and_number { - Some((hash, number)) => { + Some((hash, number)) => if self.backend.have_state_at(hash, number) { Ok(BlockStatus::InChainWithState) } else { Ok(BlockStatus::InChainPruned) - } - }, + }, None => Ok(BlockStatus::Unknown), } } @@ -1081,7 +1073,7 @@ where let genesis_hash = self.backend.blockchain().info().genesis_hash; if genesis_hash == target_hash { - return Ok(Vec::new()); + return Ok(Vec::new()) } let mut current_hash = target_hash; @@ -1097,7 +1089,7 @@ where current_hash = ancestor_hash; if genesis_hash == current_hash { - break; + break } current = ancestor; @@ -1211,15 +1203,14 @@ where size_limit: usize, ) -> sp_blockchain::Result> { if start_key.len() > MAX_NESTED_TRIE_DEPTH { - return Err(Error::Backend("Invalid start key.".to_string())); + return Err(Error::Backend("Invalid start key.".to_string())) } let state = self.state_at(hash)?; let child_info = |storage_key: &Vec| -> sp_blockchain::Result { let storage_key = PrefixedStorageKey::new_ref(storage_key); match ChildType::from_prefixed_key(storage_key) { - Some((ChildType::ParentKeyId, storage_key)) => { - Ok(ChildInfo::new_default(storage_key)) - }, + Some((ChildType::ParentKeyId, storage_key)) => + Ok(ChildInfo::new_default(storage_key)), None => Err(Error::Backend("Invalid child storage key.".to_string())), } }; @@ -1231,7 +1222,7 @@ where { Some((child_info(start_key)?, child_root)) } else { - return Err(Error::Backend("Invalid root start key.".to_string())); + return Err(Error::Backend("Invalid root start key.".to_string())) } } else { None @@ -1275,18 +1266,18 @@ where let size = value.len() + next_key.len(); if total_size + size > size_limit && !entries.is_empty() { complete = false; - break; + break } total_size += size; - if current_child.is_none() - && sp_core::storage::well_known_keys::is_child_storage_key(next_key.as_slice()) - && !child_roots.contains(value.as_slice()) + if current_child.is_none() && + sp_core::storage::well_known_keys::is_child_storage_key(next_key.as_slice()) && + !child_roots.contains(value.as_slice()) { child_roots.insert(value.clone()); switch_child_key = Some((next_key.clone(), value.clone())); entries.push((next_key.clone(), value)); - break; + break } entries.push((next_key.clone(), value)); current_key = next_key; @@ -1306,12 +1297,12 @@ where complete, )); if !complete { - break; + break } } else { result[0].0.key_values.extend(entries.into_iter()); result[0].1 = complete; - break; + break } } Ok(result) @@ -1768,7 +1759,7 @@ where match self.block_rules.lookup(number, &hash) { BlockLookupResult::KnownBad => { trace!("Rejecting known bad block: #{} {:?}", number, hash); - return Ok(ImportResult::KnownBad); + return Ok(ImportResult::KnownBad) }, BlockLookupResult::Expected(expected_hash) => { trace!( @@ -1777,7 +1768,7 @@ where expected_hash, number ); - return Ok(ImportResult::KnownBad); + return Ok(ImportResult::KnownBad) }, BlockLookupResult::NotSpecial => {}, } @@ -1788,12 +1779,10 @@ where .block_status(&BlockId::Hash(hash)) .map_err(|e| ConsensusError::ClientImport(e.to_string()))? { - BlockStatus::InChainWithState | BlockStatus::Queued => { - return Ok(ImportResult::AlreadyInChain) - }, - BlockStatus::InChainPruned if !import_existing => { - return Ok(ImportResult::AlreadyInChain) - }, + BlockStatus::InChainWithState | BlockStatus::Queued => + return Ok(ImportResult::AlreadyInChain), + BlockStatus::InChainPruned if !import_existing => + return Ok(ImportResult::AlreadyInChain), BlockStatus::InChainPruned => {}, BlockStatus::Unknown => {}, BlockStatus::KnownBad => return Ok(ImportResult::KnownBad), @@ -1960,9 +1949,8 @@ where Some(header) => { let hash = header.hash(); match (self.body(hash)?, self.justifications(hash)?) { - (Some(extrinsics), justifications) => { - Some(SignedBlock { block: Block::new(header, extrinsics), justifications }) - }, + (Some(extrinsics), justifications) => + Some(SignedBlock { block: Block::new(header, extrinsics), justifications }), _ => None, } }, diff --git a/client/service/src/client/wasm_override.rs b/client/service/src/client/wasm_override.rs index 9691ef051378d..5fc748f3e88b9 100644 --- a/client/service/src/client/wasm_override.rs +++ b/client/service/src/client/wasm_override.rs @@ -178,7 +178,7 @@ impl WasmOverride { }; if !dir.is_dir() { - return Err(WasmOverrideError::NotADirectory(dir.to_owned()).into()); + return Err(WasmOverrideError::NotADirectory(dir.to_owned()).into()) } let mut overrides = HashMap::new(); @@ -214,7 +214,7 @@ impl WasmOverride { } if !duplicates.is_empty() { - return Err(WasmOverrideError::DuplicateRuntime(duplicates).into()); + return Err(WasmOverrideError::DuplicateRuntime(duplicates).into()) } Ok(overrides) diff --git a/client/service/src/lib.rs b/client/service/src/lib.rs index f593af659d756..091b4bbe9fe5f 100644 --- a/client/service/src/lib.rs +++ b/client/service/src/lib.rs @@ -383,9 +383,8 @@ where match tokio::task::block_in_place(|| { config.tokio_handle.block_on(futures::future::try_join(http_fut, ws_fut)) }) { - Ok((http, ws)) => { - Ok(Box::new((waiting::HttpServer(Some(http)), waiting::WsServer(Some(ws))))) - }, + Ok((http, ws)) => + Ok(Box::new((waiting::HttpServer(Some(http)), waiting::WsServer(Some(ws))))), Err(e) => Err(Error::Application(e)), } } @@ -445,7 +444,7 @@ where Ok(uxt) => uxt, Err(e) => { debug!("Transaction invalid: {:?}", e); - return Box::pin(futures::future::ready(TransactionImport::Bad)); + return Box::pin(futures::future::ready(TransactionImport::Bad)) }, }; @@ -460,9 +459,8 @@ where match import_future.await { Ok(_) => TransactionImport::NewGood, Err(e) => match e.into_pool_error() { - Ok(sc_transaction_pool_api::error::Error::AlreadyImported(_)) => { - TransactionImport::KnownGood - }, + Ok(sc_transaction_pool_api::error::Error::AlreadyImported(_)) => + TransactionImport::KnownGood, Ok(e) => { debug!("Error adding transaction to the pool: {:?}", e); TransactionImport::Bad diff --git a/client/service/test/src/lib.rs b/client/service/test/src/lib.rs index 72743fc2c43e1..5d29d34a3cbf2 100644 --- a/client/service/test/src/lib.rs +++ b/client/service/test/src/lib.rs @@ -175,7 +175,7 @@ where .iter() .all(|&(ref id, ref service, _, _)| full_predicate(*id, service)) { - break; + break } } }; diff --git a/client/state-db/src/lib.rs b/client/state-db/src/lib.rs index cde4b8d6d9f34..7dde4a9a426e9 100644 --- a/client/state-db/src/lib.rs +++ b/client/state-db/src/lib.rs @@ -308,9 +308,8 @@ impl let non_canonical: NonCanonicalOverlay = NonCanonicalOverlay::new(&db)?; let pruning: Option> = match mode { PruningMode::Constrained(Constraints { max_mem: Some(_), .. }) => unimplemented!(), - PruningMode::Constrained(Constraints { max_blocks, .. }) => { - Some(RefWindow::new(db, max_blocks.unwrap_or(0), ref_counting)?) - }, + PruningMode::Constrained(Constraints { max_blocks, .. }) => + Some(RefWindow::new(db, max_blocks.unwrap_or(0), ref_counting)?), PruningMode::ArchiveAll | PruningMode::ArchiveCanonical => None, }; @@ -344,7 +343,7 @@ impl // the database atomically to keep their consistency when restarting the node let mut commit = CommitSet::default(); if self.mode == PruningMode::ArchiveAll { - return Ok(commit); + return Ok(commit) } let number = self.non_canonical.canonicalize(hash, &mut commit)?; if self.mode == PruningMode::ArchiveCanonical { @@ -391,22 +390,21 @@ impl { loop { if pruning.window_size() <= constraints.max_blocks.unwrap_or(0) as u64 { - break; + break } if constraints.max_mem.map_or(false, |m| pruning.mem_used() > m) { - break; + break } let pinned = &self.pinned; match pruning.next_hash() { // the block record is temporary unavailable, break and try next time Err(Error::StateDb(StateDbError::BlockUnavailable)) => break, - res => { + res => if res?.map_or(false, |h| pinned.contains_key(&h)) { - break; - } - }, + break + }, } match pruning.prune_one(commit) { // this branch should not reach as previous `next_hash` don't return error @@ -425,18 +423,16 @@ impl fn revert_one(&mut self) -> Option> { match self.mode { PruningMode::ArchiveAll => Some(CommitSet::default()), - PruningMode::ArchiveCanonical | PruningMode::Constrained(_) => { - self.non_canonical.revert_one() - }, + PruningMode::ArchiveCanonical | PruningMode::Constrained(_) => + self.non_canonical.revert_one(), } } fn remove(&mut self, hash: &BlockHash) -> Option> { match self.mode { PruningMode::ArchiveAll => Some(CommitSet::default()), - PruningMode::ArchiveCanonical | PruningMode::Constrained(_) => { - self.non_canonical.remove(hash) - }, + PruningMode::ArchiveCanonical | PruningMode::Constrained(_) => + self.non_canonical.remove(hash), } } @@ -447,8 +443,8 @@ impl match self.mode { PruningMode::ArchiveAll => Ok(()), PruningMode::ArchiveCanonical | PruningMode::Constrained(_) => { - let have_block = self.non_canonical.have_block(hash) - || self.pruning.as_ref().map_or(false, |pruning| { + let have_block = self.non_canonical.have_block(hash) || + self.pruning.as_ref().map_or(false, |pruning| { match pruning.have_block(hash, number) { HaveBlock::No => false, HaveBlock::Yes => true, @@ -497,7 +493,7 @@ impl Q: std::hash::Hash + Eq, { if let Some(value) = self.non_canonical.get(key) { - return Ok(Some(value)); + return Ok(Some(value)) } db.get(key.as_ref()).map_err(Error::Db) } @@ -535,12 +531,11 @@ impl requested_mode.unwrap_or_default() }, - (false, None, _) => { + (false, None, _) => return Err(StateDbError::Metadata( "An existing StateDb does not have PRUNING_MODE stored in its meta-data".into(), ) - .into()) - }, + .into()), (false, Some(stored), None) => stored, @@ -629,12 +624,12 @@ impl /// Returns last finalized block number. pub fn best_canonical(&self) -> Option { - return self.db.read().best_canonical(); + return self.db.read().best_canonical() } /// Check if block is pruned away. pub fn is_pruned(&self, hash: &BlockHash, number: u64) -> IsPruned { - return self.db.read().is_pruned(hash, number); + return self.db.read().is_pruned(hash, number) } /// Reset in-memory changes to the last disk-backed state. @@ -684,12 +679,10 @@ fn choose_pruning_mode( ) -> Result { match (stored, requested) { (PruningMode::ArchiveAll, PruningMode::ArchiveAll) => Ok(PruningMode::ArchiveAll), - (PruningMode::ArchiveCanonical, PruningMode::ArchiveCanonical) => { - Ok(PruningMode::ArchiveCanonical) - }, - (PruningMode::Constrained(_), PruningMode::Constrained(requested)) => { - Ok(PruningMode::Constrained(requested)) - }, + (PruningMode::ArchiveCanonical, PruningMode::ArchiveCanonical) => + Ok(PruningMode::ArchiveCanonical), + (PruningMode::Constrained(_), PruningMode::Constrained(requested)) => + Ok(PruningMode::Constrained(requested)), (stored, requested) => Err(StateDbError::IncompatiblePruningModes { requested, stored }), } } diff --git a/client/state-db/src/noncanonical.rs b/client/state-db/src/noncanonical.rs index 6d35ab8bf0dc1..3711cf7a42667 100644 --- a/client/state-db/src/noncanonical.rs +++ b/client/state-db/src/noncanonical.rs @@ -138,8 +138,8 @@ fn discard_descendants( while let Some(i) = level.blocks.iter().position(|overlay| { parents .get(&overlay.hash) - .expect("there is a parent entry for each entry in levels; qed") - == hash + .expect("there is a parent entry for each entry in levels; qed") == + hash }) { let overlay = level.remove(i); let mut num_pinned = discard_descendants( @@ -214,7 +214,7 @@ impl NonCanonicalOverlay { } } if level.blocks.is_empty() { - break; + break } levels.push_back(level); block += 1; @@ -258,7 +258,7 @@ impl NonCanonicalOverlay { front_block_number, front_block_number + self.levels.len() as u64, ); - return Err(StateDbError::InvalidBlockNumber); + return Err(StateDbError::InvalidBlockNumber) } // check for valid parent if inserting on second level or higher if number == front_block_number { @@ -267,14 +267,14 @@ impl NonCanonicalOverlay { .as_ref() .map_or(false, |&(ref h, n)| h == parent_hash && n == number - 1) { - return Err(StateDbError::InvalidParent); + return Err(StateDbError::InvalidParent) } } else if !self.parents.contains_key(parent_hash) { - return Err(StateDbError::InvalidParent); + return Err(StateDbError::InvalidParent) } } - let level = if self.levels.is_empty() - || number == front_block_number + self.levels.len() as u64 + let level = if self.levels.is_empty() || + number == front_block_number + self.levels.len() as u64 { self.levels.push_back(OverlayLevel::new()); self.levels.back_mut().expect("can't be empty after insertion; qed") @@ -284,10 +284,10 @@ impl NonCanonicalOverlay { }; if level.blocks.len() >= MAX_BLOCKS_PER_LEVEL as usize { - return Err(StateDbError::TooManySiblingBlocks); + return Err(StateDbError::TooManySiblingBlocks) } if level.blocks.iter().any(|b| b.hash == *hash) { - return Err(StateDbError::BlockAlreadyExists); + return Err(StateDbError::BlockAlreadyExists) } let index = level.available_index(); @@ -472,13 +472,13 @@ impl NonCanonicalOverlay { // Check that it does not have any children if (level_index != level_count - 1) && self.parents.values().any(|h| h == hash) { log::debug!(target: "state-db", "Trying to remove block {:?} with children", hash); - return None; + return None } let overlay = level.remove(index); commit.meta.deleted.push(overlay.journal_key); self.parents.remove(&overlay.hash); discard_values(&mut self.values, overlay.inserted); - break; + break } if self.levels.back().map_or(false, |l| l.blocks.is_empty()) { self.levels.pop_back(); @@ -548,8 +548,8 @@ mod tests { use sp_core::H256; fn contains(overlay: &NonCanonicalOverlay, key: u64) -> bool { - overlay.get(&H256::from_low_u64_be(key)) - == Some(H256::from_low_u64_be(key).as_bytes().to_vec()) + overlay.get(&H256::from_low_u64_be(key)) == + Some(H256::from_low_u64_be(key).as_bytes().to_vec()) } #[test] diff --git a/client/state-db/src/pruning.rs b/client/state-db/src/pruning.rs index 9f5a2609b8f2c..458522b8119fd 100644 --- a/client/state-db/src/pruning.rs +++ b/client/state-db/src/pruning.rs @@ -358,7 +358,7 @@ impl RefWindow { // if the queue is empty or the block number exceed the pruning window, we definitely // do not have this block if self.is_empty() || number < self.base || number >= self.base + self.window_size() { - return HaveBlock::No; + return HaveBlock::No } self.queue.have_block(hash, (number - self.base) as usize) } @@ -390,7 +390,7 @@ impl RefWindow { // assume that parent was canonicalized self.base = number; } else if (self.base + self.window_size()) != number { - return Err(Error::StateDb(StateDbError::InvalidBlockNumber)); + return Err(Error::StateDb(StateDbError::InvalidBlockNumber)) } trace!(target: "state-db", "Adding to pruning window: {:?} ({} inserted, {} deleted)", hash, commit.data.inserted.len(), commit.data.deleted.len()); let inserted = if matches!(self.queue, DeathRowQueue::Mem { .. }) { diff --git a/client/sysinfo/src/sysinfo.rs b/client/sysinfo/src/sysinfo.rs index 0ea22341c9af3..c66a6f6a62aed 100644 --- a/client/sysinfo/src/sysinfo.rs +++ b/client/sysinfo/src/sysinfo.rs @@ -132,7 +132,7 @@ where S: Serializer, { if let Some(throughput) = maybe_throughput { - return serializer.serialize_some(&(throughput.as_mibs() as u64)); + return serializer.serialize_some(&(throughput.as_mibs() as u64)) } serializer.serialize_none() } @@ -159,7 +159,7 @@ pub(crate) fn benchmark( elapsed = timestamp.elapsed(); if elapsed >= max_duration { - break; + break } } @@ -567,16 +567,16 @@ mod tests { #[test] fn test_benchmark_disk_sequential_writes() { assert!( - benchmark_disk_sequential_writes(DEFAULT_DISK_EXECUTION_LIMIT, "./".as_ref()).unwrap() - > Throughput::from_mibs(0.0) + benchmark_disk_sequential_writes(DEFAULT_DISK_EXECUTION_LIMIT, "./".as_ref()).unwrap() > + Throughput::from_mibs(0.0) ); } #[test] fn test_benchmark_disk_random_writes() { assert!( - benchmark_disk_random_writes(DEFAULT_DISK_EXECUTION_LIMIT, "./".as_ref()).unwrap() - > Throughput::from_mibs(0.0) + benchmark_disk_random_writes(DEFAULT_DISK_EXECUTION_LIMIT, "./".as_ref()).unwrap() > + Throughput::from_mibs(0.0) ); } diff --git a/client/telemetry/src/endpoints.rs b/client/telemetry/src/endpoints.rs index fa4e8edbaf898..fba3822a90676 100644 --- a/client/telemetry/src/endpoints.rs +++ b/client/telemetry/src/endpoints.rs @@ -65,7 +65,7 @@ fn url_to_multiaddr(url: &str) -> Result { // If not, try the `ws://path/url` format. if let Ok(ma) = libp2p::multiaddr::from_url(url) { - return Ok(ma); + return Ok(ma) } // If we have no clue about the format of that string, assume that we were expecting a diff --git a/client/telemetry/src/lib.rs b/client/telemetry/src/lib.rs index 5d859bc5f4417..503a326f76c2b 100644 --- a/client/telemetry/src/lib.rs +++ b/client/telemetry/src/lib.rs @@ -263,7 +263,7 @@ impl TelemetryWorker { "Could not initialise transport: {}", err, ); - continue; + continue }, }; entry.insert(Node::new(transport, addr.clone(), Vec::new(), Vec::new())) @@ -328,12 +328,12 @@ impl TelemetryWorker { message, )), ); - return; + return }; for (node_max_verbosity, addr) in nodes { if verbosity > *node_max_verbosity { - continue; + continue } if let Some(node) = node_pool.get_mut(addr) { diff --git a/client/telemetry/src/node.rs b/client/telemetry/src/node.rs index 43cccdf916f36..0d71a363a1b26 100644 --- a/client/telemetry/src/node.rs +++ b/client/telemetry/src/node.rs @@ -124,7 +124,7 @@ where ) -> Poll> { while let Some(item) = conn.buf.pop() { if let Err(e) = conn.sink.start_send_unpin(item) { - return Poll::Ready(Err(e)); + return Poll::Ready(Err(e)) } futures::ready!(conn.sink.poll_ready_unpin(cx))?; } @@ -157,11 +157,11 @@ where }, Poll::Ready(Ok(())) => { self.socket = NodeSocket::Connected(conn); - return Poll::Ready(Ok(())); + return Poll::Ready(Ok(())) }, Poll::Pending => { self.socket = NodeSocket::Connected(conn); - return Poll::Pending; + return Poll::Pending }, } }, @@ -171,7 +171,7 @@ where }, Poll::Pending => { self.socket = NodeSocket::Connected(conn); - return Poll::Pending; + return Poll::Pending }, }, NodeSocket::Dialing(mut s) => match Future::poll(Pin::new(&mut s), cx) { @@ -187,7 +187,7 @@ where log::debug!(target: "telemetry", "Failed to send a telemetry connection notification: {}", error); } else { self.telemetry_connection_notifier.swap_remove(index); - continue; + continue } } index += 1; @@ -244,12 +244,12 @@ where if Future::poll(Pin::new(&mut s), cx).is_ready() { socket = NodeSocket::ReconnectNow; } else { - break NodeSocket::WaitingReconnect(s); + break NodeSocket::WaitingReconnect(s) } }, NodeSocket::Poisoned => { log::error!(target: "telemetry", "‼️ Poisoned connection with {}", self.addr); - break NodeSocket::Poisoned; + break NodeSocket::Poisoned }, } }; diff --git a/client/telemetry/src/transport.rs b/client/telemetry/src/transport.rs index cdfd79732ee1f..d64da44a83b6b 100644 --- a/client/telemetry/src/transport.rs +++ b/client/telemetry/src/transport.rs @@ -112,7 +112,7 @@ impl StreamSink { log::error!(target: "telemetry", "Detected some internal buffering happening in the telemetry"); let err = io::Error::new(io::ErrorKind::Other, "Internal buffering detected"); - return Poll::Ready(Err(err)); + return Poll::Ready(Err(err)) } } diff --git a/client/tracing/proc-macro/src/lib.rs b/client/tracing/proc-macro/src/lib.rs index 39d0e16e991a2..ba757619fb5a0 100644 --- a/client/tracing/proc-macro/src/lib.rs +++ b/client/tracing/proc-macro/src/lib.rs @@ -113,7 +113,7 @@ pub fn prefix_logs_with(arg: TokenStream, item: TokenStream) -> TokenStream { "missing argument: name of the node. Example: sc_cli::prefix_logs_with()", ) .to_compile_error() - .into(); + .into() } let name = syn::parse_macro_input!(arg as Expr); diff --git a/client/tracing/src/block/mod.rs b/client/tracing/src/block/mod.rs index 1833c81b38c6d..63fd1de374cba 100644 --- a/client/tracing/src/block/mod.rs +++ b/client/tracing/src/block/mod.rs @@ -109,11 +109,11 @@ impl BlockSubscriber { impl Subscriber for BlockSubscriber { fn enabled(&self, metadata: &tracing::Metadata<'_>) -> bool { if !metadata.is_span() && metadata.fields().field(REQUIRED_EVENT_FIELD).is_none() { - return false; + return false } for (target, level) in &self.targets { if metadata.level() <= level && metadata.target().starts_with(target) { - return true; + return true } } false @@ -255,7 +255,7 @@ where return Err(Error::Dispatch(format!( "Failed to collect traces and execute block: {}", e - ))); + ))) } } @@ -339,7 +339,7 @@ fn patch_and_filter(mut span: SpanDatum, targets: &str) -> Option { span.target = t; } if !check_target(targets, &span.target, &span.level) { - return None; + return None } } Some(span.into()) @@ -349,7 +349,7 @@ fn patch_and_filter(mut span: SpanDatum, targets: &str) -> Option { fn check_target(targets: &str, target: &str, level: &Level) -> bool { for (t, l) in targets.split(',').map(crate::parse_target) { if target.starts_with(t.as_str()) && level <= &l { - return true; + return true } } false diff --git a/client/tracing/src/lib.rs b/client/tracing/src/lib.rs index 89aa2219f52aa..1ae695a725f3f 100644 --- a/client/tracing/src/lib.rs +++ b/client/tracing/src/lib.rs @@ -141,10 +141,10 @@ impl Values { /// Checks if all individual collections are empty pub fn is_empty(&self) -> bool { - self.bool_values.is_empty() - && self.i64_values.is_empty() - && self.u64_values.is_empty() - && self.string_values.is_empty() + self.bool_values.is_empty() && + self.i64_values.is_empty() && + self.u64_values.is_empty() && + self.string_values.is_empty() } } @@ -175,10 +175,10 @@ impl Serialize for Values { where S: Serializer, { - let len = self.bool_values.len() - + self.i64_values.len() - + self.u64_values.len() - + self.string_values.len(); + let len = self.bool_values.len() + + self.i64_values.len() + + self.u64_values.len() + + self.string_values.len(); let mut map = serializer.serialize_map(Some(len))?; for (k, v) in &self.bool_values { map.serialize_entry(k, v)?; @@ -250,7 +250,7 @@ impl ProfilingLayer { fn check_target(&self, target: &str, level: &Level) -> bool { for t in &self.targets { if target.starts_with(t.0.as_str()) && level <= &t.1 { - return true; + return true } } false @@ -627,7 +627,7 @@ mod tests { tracing::event!(target: "test_target", tracing::Level::INFO, "test_event1"); for msg in rx.recv() { if !msg { - break; + break } } // guard2 and span2 dropped / exited diff --git a/client/tracing/src/logging/event_format.rs b/client/tracing/src/logging/event_format.rs index ab90dec6bf6b1..aec6b76843daf 100644 --- a/client/tracing/src/logging/event_format.rs +++ b/client/tracing/src/logging/event_format.rs @@ -95,7 +95,7 @@ where let exts = span.extensions(); if let Some(prefix) = exts.get::() { write!(writer, "{}", prefix.as_str())?; - break; + break } } } @@ -130,10 +130,10 @@ where writer: &mut dyn fmt::Write, event: &Event, ) -> fmt::Result { - if self.dup_to_stdout - && (event.metadata().level() == &Level::INFO - || event.metadata().level() == &Level::WARN - || event.metadata().level() == &Level::ERROR) + if self.dup_to_stdout && + (event.metadata().level() == &Level::INFO || + event.metadata().level() == &Level::WARN || + event.metadata().level() == &Level::ERROR) { let mut out = String::new(); self.format_event_custom(CustomFmtContext::FmtContext(ctx), &mut out, event)?; @@ -276,9 +276,8 @@ where ) -> fmt::Result { match self { CustomFmtContext::FmtContext(fmt_ctx) => fmt_ctx.format_fields(writer, fields), - CustomFmtContext::ContextWithFormatFields(_ctx, fmt_fields) => { - fmt_fields.format_fields(writer, fields) - }, + CustomFmtContext::ContextWithFormatFields(_ctx, fmt_fields) => + fmt_fields.format_fields(writer, fields), } } } diff --git a/client/tracing/src/logging/layers/prefix_layer.rs b/client/tracing/src/logging/layers/prefix_layer.rs index 470ffd61f02c8..836ffd2adda8e 100644 --- a/client/tracing/src/logging/layers/prefix_layer.rs +++ b/client/tracing/src/logging/layers/prefix_layer.rs @@ -42,12 +42,12 @@ where "newly created span with ID {:?} did not exist in the registry; this is a bug!", id ); - return; + return }, }; if span.name() != PREFIX_LOG_SPAN { - return; + return } let mut extensions = span.extensions_mut(); diff --git a/client/tracing/src/logging/stderr_writer.rs b/client/tracing/src/logging/stderr_writer.rs index 555d0343cdb74..de78a61af41a2 100644 --- a/client/tracing/src/logging/stderr_writer.rs +++ b/client/tracing/src/logging/stderr_writer.rs @@ -105,9 +105,9 @@ fn log_autoflush_thread() { buffer = BUFFER.lock(); if buffer.len() >= ASYNC_FLUSH_THRESHOLD { // While we were busy flushing we picked up enough logs to do another flush. - continue; + continue } else { - break; + break } } } diff --git a/client/transaction-pool/src/enactment_state.rs b/client/transaction-pool/src/enactment_state.rs index 5ce325176211e..6aac98641cf85 100644 --- a/client/transaction-pool/src/enactment_state.rs +++ b/client/transaction-pool/src/enactment_state.rs @@ -87,7 +87,7 @@ where // block was already finalized if self.recent_finalized_block == new_hash { log::debug!(target: "txpool", "handle_enactment: block already finalized"); - return Ok(None); + return Ok(None) } // compute actual tree route from best_block to notified block, and use @@ -109,7 +109,7 @@ where "Recently finalized block {} would be retracted by ChainEvent {}, skipping", self.recent_finalized_block, new_hash ); - return Ok(None); + return Ok(None) } if finalized { @@ -124,7 +124,7 @@ where target: "txpool", "handle_enactment: no newly enacted blocks since recent best block" ); - return Ok(None); + return Ok(None) } // otherwise enacted finalized block becomes best block... diff --git a/client/transaction-pool/src/graph/base_pool.rs b/client/transaction-pool/src/graph/base_pool.rs index 6596b1e6da416..8e0422739cc63 100644 --- a/client/transaction-pool/src/graph/base_pool.rs +++ b/client/transaction-pool/src/graph/base_pool.rs @@ -268,7 +268,7 @@ impl BasePool) -> error::Result> { if self.is_imported(&tx.hash) { - return Err(error::Error::AlreadyImported(Box::new(tx.hash))); + return Err(error::Error::AlreadyImported(Box::new(tx.hash))) } let tx = WaitingTransaction::new(tx, self.ready.provided_tags(), &self.recently_pruned); @@ -283,12 +283,12 @@ impl BasePool BasePool { + Err(e) => if first { debug!(target: "txpool", "[{:?}] Error importing: {:?}", current_hash, e); - return Err(e); + return Err(e) } else { failed.push(current_hash); - } - }, + }, } first = false; } @@ -349,7 +348,7 @@ impl BasePool BasePool worst, - Ordering::Equal => { + Ordering::Equal => if worst.insertion_id > transaction.insertion_id { transaction.clone() } else { worst - } - }, + }, Ordering::Greater => transaction.clone(), } }) @@ -422,7 +420,7 @@ impl BasePool BasePool WaitingTransaction { .filter(|tag| { // is true if the tag is already satisfied either via transaction in the pool // or one that was recently included. - let is_provided = provided.contains_key(&**tag) - || recently_pruned.iter().any(|x| x.contains(&**tag)); + let is_provided = provided.contains_key(&**tag) || + recently_pruned.iter().any(|x| x.contains(&**tag)); !is_provided }) .cloned() diff --git a/client/transaction-pool/src/graph/pool.rs b/client/transaction-pool/src/graph/pool.rs index 480f006dfe4a9..7b3a8db15982a 100644 --- a/client/transaction-pool/src/graph/pool.rs +++ b/client/transaction-pool/src/graph/pool.rs @@ -399,7 +399,7 @@ impl Pool { let ignore_banned = matches!(check, CheckBannedBeforeVerify::No); if let Err(err) = self.validated_pool.check_is_known(&hash, ignore_banned) { - return (hash, ValidatedTransaction::Invalid(hash, err)); + return (hash, ValidatedTransaction::Invalid(hash, err)) } let validation_result = self @@ -414,7 +414,7 @@ impl Pool { }; let validity = match status { - Ok(validity) => { + Ok(validity) => if validity.provides.is_empty() { ValidatedTransaction::Invalid(hash, error::Error::NoTagsProvided.into()) } else { @@ -426,14 +426,11 @@ impl Pool { bytes, validity, ) - } - }, - Err(TransactionValidityError::Invalid(e)) => { - ValidatedTransaction::Invalid(hash, error::Error::InvalidTransaction(e).into()) - }, - Err(TransactionValidityError::Unknown(e)) => { - ValidatedTransaction::Unknown(hash, error::Error::UnknownTransaction(e).into()) - }, + }, + Err(TransactionValidityError::Invalid(e)) => + ValidatedTransaction::Invalid(hash, error::Error::InvalidTransaction(e).into()), + Err(TransactionValidityError::Unknown(e)) => + ValidatedTransaction::Unknown(hash, error::Error::UnknownTransaction(e).into()), }; (hash, validity) diff --git a/client/transaction-pool/src/graph/ready.rs b/client/transaction-pool/src/graph/ready.rs index b27022c46a342..220e69b13e7eb 100644 --- a/client/transaction-pool/src/graph/ready.rs +++ b/client/transaction-pool/src/graph/ready.rs @@ -431,7 +431,7 @@ impl ReadyTransactions { // early exit if we are not replacing anything. if replace_hashes.is_empty() { - return Ok((vec![], vec![])); + return Ok((vec![], vec![])) } // now check if collective priority is lower than the replacement transaction. @@ -447,7 +447,7 @@ impl ReadyTransactions { // bail - the transaction has too low priority to replace the old ones if old_priority >= tx.priority { - return Err(error::Error::TooLowPriority { old: old_priority, new: tx.priority }); + return Err(error::Error::TooLowPriority { old: old_priority, new: tx.priority }) } // construct a list of unlocked transactions @@ -548,7 +548,7 @@ impl Iterator for BestIterator { "[{:?}] Skipping invalid child transaction while iterating.", hash ); - continue; + continue } let ready = match self.all.get(hash).cloned() { @@ -574,7 +574,7 @@ impl Iterator for BestIterator { } } - return Some(best.transaction); + return Some(best.transaction) } } } @@ -773,18 +773,18 @@ mod tests { }; // higher priority = better assert!( - TransactionRef { transaction: Arc::new(with_priority(3, 3)), insertion_id: 1 } - > TransactionRef { transaction: Arc::new(with_priority(2, 3)), insertion_id: 2 } + TransactionRef { transaction: Arc::new(with_priority(3, 3)), insertion_id: 1 } > + TransactionRef { transaction: Arc::new(with_priority(2, 3)), insertion_id: 2 } ); // lower validity = better assert!( - TransactionRef { transaction: Arc::new(with_priority(3, 2)), insertion_id: 1 } - > TransactionRef { transaction: Arc::new(with_priority(3, 3)), insertion_id: 2 } + TransactionRef { transaction: Arc::new(with_priority(3, 2)), insertion_id: 1 } > + TransactionRef { transaction: Arc::new(with_priority(3, 3)), insertion_id: 2 } ); // lower insertion_id = better assert!( - TransactionRef { transaction: Arc::new(with_priority(3, 3)), insertion_id: 1 } - > TransactionRef { transaction: Arc::new(with_priority(3, 3)), insertion_id: 2 } + TransactionRef { transaction: Arc::new(with_priority(3, 3)), insertion_id: 1 } > + TransactionRef { transaction: Arc::new(with_priority(3, 3)), insertion_id: 2 } ); } diff --git a/client/transaction-pool/src/graph/rotator.rs b/client/transaction-pool/src/graph/rotator.rs index b0ce60b06357e..47e00a1292155 100644 --- a/client/transaction-pool/src/graph/rotator.rs +++ b/client/transaction-pool/src/graph/rotator.rs @@ -88,7 +88,7 @@ impl PoolRotator { xt: &Transaction, ) -> bool { if xt.valid_till > current_block { - return false; + return false } self.ban(now, iter::once(xt.hash.clone())); diff --git a/client/transaction-pool/src/graph/validated_pool.rs b/client/transaction-pool/src/graph/validated_pool.rs index b77d3a9a7f0c3..dcb8195073733 100644 --- a/client/transaction-pool/src/graph/validated_pool.rs +++ b/client/transaction-pool/src/graph/validated_pool.rs @@ -185,9 +185,8 @@ impl ValidatedPool { results .into_iter() .map(|res| match res { - Ok(ref hash) if removed.contains(hash) => { - Err(error::Error::ImmediatelyDropped.into()) - }, + Ok(ref hash) if removed.contains(hash) => + Err(error::Error::ImmediatelyDropped.into()), other => other, }) .collect() @@ -198,7 +197,7 @@ impl ValidatedPool { match tx { ValidatedTransaction::Valid(tx) => { if !tx.propagate && !(self.is_validator.0)() { - return Err(error::Error::Unactionable.into()); + return Err(error::Error::Unactionable.into()) } let imported = self.pool.write().import(tx)?; @@ -207,7 +206,7 @@ impl ValidatedPool { let sinks = &mut self.import_notification_sinks.lock(); sinks.retain_mut(|sink| match sink.try_send(*hash) { Ok(()) => true, - Err(e) => { + Err(e) => if e.is_full() { log::warn!( target: "txpool", @@ -217,8 +216,7 @@ impl ValidatedPool { true } else { false - } - }, + }, }); } @@ -243,8 +241,8 @@ impl ValidatedPool { let future_limit = &self.options.future; log::debug!(target: "txpool", "Pool Status: {:?}", status); - if ready_limit.is_exceeded(status.ready, status.ready_bytes) - || future_limit.is_exceeded(status.future, status.future_bytes) + if ready_limit.is_exceeded(status.ready, status.ready_bytes) || + future_limit.is_exceeded(status.future, status.future_bytes) { log::debug!( target: "txpool", @@ -405,8 +403,8 @@ impl ValidatedPool { final_statuses.insert(hash, Status::Failed); }, }, - ValidatedTransaction::Invalid(_, _) - | ValidatedTransaction::Unknown(_, _) => { + ValidatedTransaction::Invalid(_, _) | + ValidatedTransaction::Unknown(_, _) => { final_statuses.insert(hash, Status::Failed); }, } @@ -604,7 +602,7 @@ impl ValidatedPool { pub fn remove_invalid(&self, hashes: &[ExtrinsicHash]) -> Vec> { // early exit in case there is no invalid transactions. if hashes.is_empty() { - return vec![]; + return vec![] } log::debug!(target: "txpool", "Removing invalid transactions: {:?}", hashes); diff --git a/client/transaction-pool/src/lib.rs b/client/transaction-pool/src/lib.rs index d17ba996f6078..a441bf9b2a9a0 100644 --- a/client/transaction-pool/src/lib.rs +++ b/client/transaction-pool/src/lib.rs @@ -209,9 +209,8 @@ where ) -> Self { let pool = Arc::new(graph::Pool::new(options, is_validator, pool_api.clone())); let (revalidation_queue, background_task) = match revalidation_type { - RevalidationType::Light => { - (revalidation::RevalidationQueue::new(pool_api.clone(), pool.clone()), None) - }, + RevalidationType::Light => + (revalidation::RevalidationQueue::new(pool_api.clone(), pool.clone()), None), RevalidationType::Full => { let (queue, background) = revalidation::RevalidationQueue::new_background(pool_api.clone(), pool.clone()); @@ -228,9 +227,8 @@ where pool, revalidation_queue: Arc::new(revalidation_queue), revalidation_strategy: Arc::new(Mutex::new(match revalidation_type { - RevalidationType::Light => { - RevalidationStrategy::Light(RevalidationStatus::NotScheduled) - }, + RevalidationType::Light => + RevalidationStrategy::Light(RevalidationStatus::NotScheduled), RevalidationType::Full => RevalidationStrategy::Always, })), ready_poll: Arc::new(Mutex::new(ReadyPoll::new(best_block_number))), @@ -345,13 +343,13 @@ where // There could be transaction being added because of some re-org happening at the relevant // block, but this is relative unlikely. if status.ready == 0 && status.future == 0 { - return async { Box::new(std::iter::empty()) as Box<_> }.boxed(); + return async { Box::new(std::iter::empty()) as Box<_> }.boxed() } if self.ready_poll.lock().updated_at() >= at { log::trace!(target: "txpool", "Transaction pool already processed block #{}", at); let iterator: ReadyIteratorFor = Box::new(self.pool.validated_pool().ready()); - return async move { iterator }.boxed(); + return async move { iterator }.boxed() } self.ready_poll @@ -538,8 +536,8 @@ impl RevalidationStatus { }, Self::Scheduled(revalidate_at_time, revalidate_at_block) => { let is_required = - revalidate_at_time.map(|at| Instant::now() >= at).unwrap_or(false) - || revalidate_at_block.map(|at| block >= at).unwrap_or(false); + revalidate_at_time.map(|at| Instant::now() >= at).unwrap_or(false) || + revalidate_at_block.map(|at| block >= at).unwrap_or(false); if is_required { *self = Self::InProgress; } @@ -573,11 +571,11 @@ async fn prune_known_txs_for_block h, Ok(None) => { log::debug!(target: "txpool", "Could not find header for {:?}.", block_hash); - return hashes; + return hashes }, Err(e) => { log::debug!(target: "txpool", "Error retrieving header for {:?}: {}", block_hash, e); - return hashes; + return hashes }, }; @@ -612,7 +610,7 @@ where "Skipping ChainEvent - no last block in tree route {:?}", tree_route, ); - return; + return }, }; @@ -739,11 +737,10 @@ where let compute_tree_route = |from, to| -> Result, String> { match self.api.tree_route(from, to) { Ok(tree_route) => Ok(tree_route), - Err(e) => { + Err(e) => return Err(format!( "Error occurred while computing tree_route from {from:?} to {to:?}: {e}" - )) - }, + )), } }; diff --git a/client/transaction-pool/src/revalidation.rs b/client/transaction-pool/src/revalidation.rs index d82bd015a1f58..b4b4299240a32 100644 --- a/client/transaction-pool/src/revalidation.rs +++ b/client/transaction-pool/src/revalidation.rs @@ -188,7 +188,7 @@ impl RevalidationWorker { ext_hash, ); - continue; + continue } self.block_ordered diff --git a/client/utils/src/mpsc.rs b/client/utils/src/mpsc.rs index 620eccbfc7d76..ee3fba4a5ee67 100644 --- a/client/utils/src/mpsc.rs +++ b/client/utils/src/mpsc.rs @@ -119,7 +119,7 @@ mod inner { let mut count = 0; loop { if self.1.is_terminated() { - break; + break } match self.try_next() { diff --git a/client/utils/src/status_sinks.rs b/client/utils/src/status_sinks.rs index 03313e1f20d79..a1d965d08085e 100644 --- a/client/utils/src/status_sinks.rs +++ b/client/utils/src/status_sinks.rs @@ -151,7 +151,7 @@ impl<'a, T> Drop for ReadySinkEvent<'a, T> { fn drop(&mut self) { if let Some(sender) = self.sender.take() { if sender.is_closed() { - return; + return } let _ = self.sinks.entries_tx.unbounded_send(YieldAfter { diff --git a/frame/alliance/src/lib.rs b/frame/alliance/src/lib.rs index f80df8026542d..7e03da9ac1c7b 100644 --- a/frame/alliance/src/lib.rs +++ b/frame/alliance/src/lib.rs @@ -1130,7 +1130,7 @@ impl, I: 'static> Pallet { let res = judgement(who); if res.is_err() { if let Some(parent) = T::IdentityVerifier::super_account_id(who) { - return judgement(&parent); + return judgement(&parent) } } res diff --git a/frame/assets/src/functions.rs b/frame/assets/src/functions.rs index 2436ed6364e25..f7f11cafecbe2 100644 --- a/frame/assets/src/functions.rs +++ b/frame/assets/src/functions.rs @@ -121,21 +121,21 @@ impl, I: 'static> Pallet { None => return DepositConsequence::UnknownAsset, }; if increase_supply && details.supply.checked_add(&amount).is_none() { - return DepositConsequence::Overflow; + return DepositConsequence::Overflow } if let Some(balance) = Self::maybe_balance(id, who) { if balance.checked_add(&amount).is_none() { - return DepositConsequence::Overflow; + return DepositConsequence::Overflow } } else { if amount < details.min_balance { - return DepositConsequence::BelowMinimum; + return DepositConsequence::BelowMinimum } if !details.is_sufficient && !frame_system::Pallet::::can_inc_consumer(who) { - return DepositConsequence::CannotCreate; + return DepositConsequence::CannotCreate } if details.is_sufficient && details.sufficients.checked_add(1).is_none() { - return DepositConsequence::Overflow; + return DepositConsequence::Overflow } } @@ -155,20 +155,20 @@ impl, I: 'static> Pallet { None => return UnknownAsset, }; if details.supply.checked_sub(&amount).is_none() { - return Underflow; + return Underflow } if details.status == AssetStatus::Frozen { - return Frozen; + return Frozen } if amount.is_zero() { - return Success; + return Success } let account = match Account::::get(id, who) { Some(a) => a, None => return NoFunds, }; if account.is_frozen { - return Frozen; + return Frozen } if let Some(rest) = account.balance.checked_sub(&amount) { if let Some(frozen) = T::Freezer::frozen_balance(id, who) { @@ -258,7 +258,7 @@ impl, I: 'static> Pallet { Ok(dust) => actual.saturating_add(dust), //< guaranteed by reducible_balance Err(e) => { debug_assert!(false, "passed from reducible_balance; qed"); - return Err(e); + return Err(e) }, }; @@ -384,7 +384,7 @@ impl, I: 'static> Pallet { ) -> DispatchResult, ) -> DispatchResult { if amount.is_zero() { - return Ok(()); + return Ok(()) } Self::can_increase(id, beneficiary, amount, true).into_result()?; @@ -470,7 +470,7 @@ impl, I: 'static> Pallet { ) -> DispatchResult, ) -> Result { if amount.is_zero() { - return Ok(amount); + return Ok(amount) } let details = Asset::::get(id).ok_or(Error::::Unknown)?; @@ -493,7 +493,7 @@ impl, I: 'static> Pallet { debug_assert!(account.balance.is_zero(), "checked in prep; qed"); target_died = Some(Self::dead_account(target, details, &account.reason, false)); if let Some(Remove) = target_died { - return Ok(()); + return Ok(()) } }; *maybe_account = Some(account); @@ -546,7 +546,7 @@ impl, I: 'static> Pallet { ) -> Result<(T::Balance, Option), DispatchError> { // Early exit if no-op. if amount.is_zero() { - return Ok((amount, None)); + return Ok((amount, None)) } let details = Asset::::get(id).ok_or(Error::::Unknown)?; ensure!(details.status == AssetStatus::Live, Error::::AssetNotLive); @@ -569,7 +569,7 @@ impl, I: 'static> Pallet { // Skip if source == dest if source == dest { - return Ok(()); + return Ok(()) } // Burn any dust if needed. @@ -614,7 +614,7 @@ impl, I: 'static> Pallet { Some(Self::dead_account(source, details, &source_account.reason, false)); if let Some(Remove) = source_died { Account::::remove(id, &source); - return Ok(()); + return Ok(()) } } Account::::insert(id, &source, &source_account); @@ -706,7 +706,7 @@ impl, I: 'static> Pallet { let _ = Self::dead_account(&who, &mut details, &v.reason, true); dead_accounts.push(who); if dead_accounts.len() >= (max_items as usize) { - break; + break } } remaining_accounts = details.accounts; @@ -746,7 +746,7 @@ impl, I: 'static> Pallet { removed_approvals = removed_approvals.saturating_add(1); details.approvals = details.approvals.saturating_sub(1); if removed_approvals >= max_items { - break; + break } } Self::deposit_event(Event::ApprovalsDestroyed { diff --git a/frame/assets/src/impl_stored_map.rs b/frame/assets/src/impl_stored_map.rs index 9e9af9d48c1ee..a4669c776ed41 100644 --- a/frame/assets/src/impl_stored_map.rs +++ b/frame/assets/src/impl_stored_map.rs @@ -42,7 +42,7 @@ impl, I: 'static> StoredMap<(T::AssetId, T::AccountId), T::Extra> f if let Some(ref mut account) = maybe_account { account.extra = extra; } else { - return Err(DispatchError::NoProviders.into()); + return Err(DispatchError::NoProviders.into()) } } else { // They want to delete it. Let this pass if the item never existed anyway. diff --git a/frame/assets/src/lib.rs b/frame/assets/src/lib.rs index e51c7cc0427f8..cdd0553218225 100644 --- a/frame/assets/src/lib.rs +++ b/frame/assets/src/lib.rs @@ -1001,7 +1001,7 @@ pub mod pallet { ensure!(details.status == AssetStatus::Live, Error::::LiveAsset); ensure!(origin == details.owner, Error::::NoPermission); if details.owner == owner { - return Ok(()); + return Ok(()) } let metadata_deposit = Metadata::::get(id).deposit; diff --git a/frame/assets/src/types.rs b/frame/assets/src/types.rs index 88c3b2cd8193a..557af6bd3f488 100644 --- a/frame/assets/src/types.rs +++ b/frame/assets/src/types.rs @@ -102,7 +102,7 @@ pub enum ExistenceReason { impl ExistenceReason { pub(crate) fn take_deposit(&mut self) -> Option { if !matches!(self, ExistenceReason::DepositHeld(_)) { - return None; + return None } if let ExistenceReason::DepositHeld(deposit) = sp_std::mem::replace(self, ExistenceReason::DepositRefunded) diff --git a/frame/aura/src/lib.rs b/frame/aura/src/lib.rs index e2a70f53bb532..ff2c5df04a453 100644 --- a/frame/aura/src/lib.rs +++ b/frame/aura/src/lib.rs @@ -181,7 +181,7 @@ impl Pallet { let pre_runtime_digests = digest.logs.iter().filter_map(|d| d.as_pre_runtime()); for (id, mut data) in pre_runtime_digests { if id == AURA_ENGINE_ID { - return Slot::decode(&mut data).ok(); + return Slot::decode(&mut data).ok() } } @@ -252,7 +252,7 @@ impl FindAuthor for Pallet { if id == AURA_ENGINE_ID { let slot = Slot::decode(&mut data).ok()?; let author_index = *slot % Self::authorities().len() as u64; - return Some(author_index as u32); + return Some(author_index as u32) } } diff --git a/frame/authorship/src/lib.rs b/frame/authorship/src/lib.rs index 5d081aa932276..c08e773abe3a7 100644 --- a/frame/authorship/src/lib.rs +++ b/frame/authorship/src/lib.rs @@ -123,7 +123,7 @@ where if let Some(ref author) = author { if !acc.insert((*number, author.clone())) { - return Err("more than one uncle per number per author included"); + return Err("more than one uncle per number per author included") } } @@ -243,7 +243,7 @@ pub mod pallet { ensure!(new_uncles.len() <= MAX_UNCLES, Error::::TooManyUncles); if >::get() { - return Err(Error::::UnclesAlreadySet.into()); + return Err(Error::::UnclesAlreadySet.into()) } >::put(true); @@ -282,7 +282,7 @@ pub mod pallet { existing_hashes.push(hash); if new_uncles.len() == MAX_UNCLES { - break; + break } }, Err(_) => { @@ -304,9 +304,8 @@ pub mod pallet { _data: &InherentData, ) -> result::Result<(), Self::Error> { match call { - Call::set_uncles { ref new_uncles } if new_uncles.len() > MAX_UNCLES => { - Err(InherentError::Uncles(Error::::TooManyUncles.as_str().into())) - }, + Call::set_uncles { ref new_uncles } if new_uncles.len() > MAX_UNCLES => + Err(InherentError::Uncles(Error::::TooManyUncles.as_str().into())), _ => Ok(()), } } @@ -325,7 +324,7 @@ impl Pallet { pub fn author() -> Option { // Check the memoized storage value. if let Some(author) = >::get() { - return Some(author); + return Some(author) } let digest = >::digest(); @@ -384,30 +383,30 @@ impl Pallet { let hash = uncle.hash(); if uncle.number() < &One::one() { - return Err(Error::::GenesisUncle.into()); + return Err(Error::::GenesisUncle.into()) } if uncle.number() > &maximum_height { - return Err(Error::::TooHighUncle.into()); + return Err(Error::::TooHighUncle.into()) } { let parent_number = *uncle.number() - One::one(); let parent_hash = >::block_hash(&parent_number); if &parent_hash != uncle.parent_hash() { - return Err(Error::::InvalidUncleParent.into()); + return Err(Error::::InvalidUncleParent.into()) } } if uncle.number() < &minimum_height { - return Err(Error::::OldUncle.into()); + return Err(Error::::OldUncle.into()) } let duplicate = existing_uncles.into_iter().any(|h| *h == hash); let in_chain = >::block_hash(uncle.number()) == hash; if duplicate || in_chain { - return Err(Error::::UncleAlreadyIncluded.into()); + return Err(Error::::UncleAlreadyIncluded.into()) } // check uncle validity. @@ -509,7 +508,7 @@ mod tests { { for (id, mut data) in digests { if id == TEST_ID { - return u64::decode(&mut data).ok(); + return u64::decode(&mut data).ok() } } @@ -533,9 +532,9 @@ mod tests { Err(_) => return Err("wrong seal"), Ok(a) => { if a != author { - return Err("wrong author in seal"); + return Err("wrong author in seal") } - break; + break }, } } diff --git a/frame/babe/src/equivocation.rs b/frame/babe/src/equivocation.rs index 2cc7a6c940704..f55bda751887d 100644 --- a/frame/babe/src/equivocation.rs +++ b/frame/babe/src/equivocation.rs @@ -196,7 +196,7 @@ impl Pallet { "rejecting unsigned report equivocation transaction because it is not local/in-block.", ); - return InvalidTransaction::Call.into(); + return InvalidTransaction::Call.into() }, } diff --git a/frame/babe/src/lib.rs b/frame/babe/src/lib.rs index 85f1799324841..eadaa036332fa 100644 --- a/frame/babe/src/lib.rs +++ b/frame/babe/src/lib.rs @@ -488,7 +488,7 @@ impl FindAuthor for Pallet { for (id, mut data) in digests.into_iter() { if id == BABE_ENGINE_ID { let pre_digest: PreDigest = PreDigest::decode(&mut data).ok()?; - return Some(pre_digest.authority_index()); + return Some(pre_digest.authority_index()) } } @@ -733,7 +733,7 @@ impl Pallet { // let's ensure that we only do the initialization once per block let initialized = Self::initialized().is_some(); if initialized { - return; + return } let pre_digest = @@ -804,7 +804,7 @@ impl Pallet { // validate the equivocation proof if !sp_consensus_babe::check_equivocation_proof(equivocation_proof) { - return Err(Error::::InvalidEquivocationProof.into()); + return Err(Error::::InvalidEquivocationProof.into()) } let validator_set_count = key_owner_proof.validator_count(); @@ -816,7 +816,7 @@ impl Pallet { // check that the slot number is consistent with the session index // in the key ownership proof (i.e. slot is for that epoch) if epoch_index != session_index { - return Err(Error::::InvalidKeyOwnershipProof.into()); + return Err(Error::::InvalidKeyOwnershipProof.into()) } // check the membership proof and extract the offender's id diff --git a/frame/babe/src/tests.rs b/frame/babe/src/tests.rs index c93b133897865..d4132e6378540 100644 --- a/frame/babe/src/tests.rs +++ b/frame/babe/src/tests.rs @@ -242,8 +242,8 @@ fn can_estimate_current_epoch_progress() { ); } else { assert!( - Babe::estimate_current_session_progress(i).0.unwrap() - < Permill::from_percent(100) + Babe::estimate_current_session_progress(i).0.unwrap() < + Permill::from_percent(100) ); } } @@ -485,7 +485,7 @@ fn report_equivocation_current_session_works() { // check that the balances of all other validators are left intact. for validator in &validators { if *validator == offending_validator_id { - continue; + continue } assert_eq!(Balances::total_balance(validator), 10_000_000); diff --git a/frame/bags-list/remote-tests/src/lib.rs b/frame/bags-list/remote-tests/src/lib.rs index cb4db5257f1a9..fc25e3b65ddb1 100644 --- a/frame/bags-list/remote-tests/src/lib.rs +++ b/frame/bags-list/remote-tests/src/lib.rs @@ -80,7 +80,7 @@ pub fn display_and_check_bags>( Some(bag) => bag, None => { log::info!(target: LOG_TARGET, "{} NO VOTERS.", pretty_thresh); - continue; + continue }, }; diff --git a/frame/bags-list/src/list/mod.rs b/frame/bags-list/src/list/mod.rs index 744cfe79bb21a..272526ad1a636 100644 --- a/frame/bags-list/src/list/mod.rs +++ b/frame/bags-list/src/list/mod.rs @@ -143,7 +143,7 @@ impl, I: 'static> List { pub fn migrate(old_thresholds: &[T::Score]) -> u32 { let new_thresholds = T::BagThresholds::get(); if new_thresholds == old_thresholds { - return 0; + return 0 } // we can't check all preconditions, but we can check one @@ -178,7 +178,7 @@ impl, I: 'static> List { if !affected_old_bags.insert(affected_bag) { // If the previous threshold list was [10, 20], and we insert [3, 5], then there's // no point iterating through bag 10 twice. - continue; + continue } if let Some(bag) = Bag::::get(affected_bag) { @@ -190,7 +190,7 @@ impl, I: 'static> List { // a removed bag means that all members of that bag must be rebagged for removed_bag in removed_bags.clone() { if !affected_old_bags.insert(removed_bag) { - continue; + continue } if let Some(bag) = Bag::::get(removed_bag) { @@ -249,14 +249,15 @@ impl, I: 'static> List { // easier; they can just configure `type BagThresholds = ()`. let thresholds = T::BagThresholds::get(); let iter = thresholds.iter().copied(); - let iter: Box> = - if thresholds.last() == Some(&T::Score::max_value()) { - // in the event that they included it, we can just pass the iterator through unchanged. - Box::new(iter.rev()) - } else { - // otherwise, insert it here. - Box::new(iter.chain(iter::once(T::Score::max_value())).rev()) - }; + let iter: Box> = if thresholds.last() == + Some(&T::Score::max_value()) + { + // in the event that they included it, we can just pass the iterator through unchanged. + Box::new(iter.rev()) + } else { + // otherwise, insert it here. + Box::new(iter.chain(iter::once(T::Score::max_value())).rev()) + }; iter.filter_map(Bag::get).flat_map(|bag| bag.iter()) } @@ -312,7 +313,7 @@ impl, I: 'static> List { /// Returns an error if the list already contains `id`. pub(crate) fn insert(id: T::AccountId, score: T::Score) -> Result<(), ListError> { if Self::contains(&id) { - return Err(ListError::Duplicate); + return Err(ListError::Duplicate) } let bag_score = notional_bag_for::(score); @@ -338,7 +339,7 @@ impl, I: 'static> List { /// Remove an id from the list, returning an error if `id` does not exists. pub(crate) fn remove(id: &T::AccountId) -> Result<(), ListError> { if !Self::contains(id) { - return Err(ListError::NodeNotFound); + return Err(ListError::NodeNotFound) } let _ = Self::remove_many(sp_std::iter::once(id)); Ok(()) @@ -566,14 +567,15 @@ impl, I: 'static> List { let thresholds = T::BagThresholds::get(); let iter = thresholds.iter().copied(); - let iter: Box> = - if thresholds.last() == Some(&T::Score::max_value()) { - // in the event that they included it, we can just pass the iterator through unchanged. - Box::new(iter) - } else { - // otherwise, insert it here. - Box::new(iter.chain(sp_std::iter::once(T::Score::max_value()))) - }; + let iter: Box> = if thresholds.last() == + Some(&T::Score::max_value()) + { + // in the event that they included it, we can just pass the iterator through unchanged. + Box::new(iter) + } else { + // otherwise, insert it here. + Box::new(iter.chain(sp_std::iter::once(T::Score::max_value()))) + }; iter.filter_map(|t| { Bag::::get(t) @@ -691,7 +693,7 @@ impl, I: 'static> Bag { // this should never happen, but this check prevents one path to a worst case // infinite loop. defensive!("system logic error: inserting a node who has the id of tail"); - return; + return }; } @@ -902,9 +904,9 @@ impl, I: 'static> Node { "node does not exist in the expected bag" ); - let non_terminal_check = !self.is_terminal() - && expected_bag.head.as_ref() != Some(id) - && expected_bag.tail.as_ref() != Some(id); + let non_terminal_check = !self.is_terminal() && + expected_bag.head.as_ref() != Some(id) && + expected_bag.tail.as_ref() != Some(id); let terminal_check = expected_bag.head.as_ref() == Some(id) || expected_bag.tail.as_ref() == Some(id); frame_support::ensure!( diff --git a/frame/bags-list/src/migrations.rs b/frame/bags-list/src/migrations.rs index c12fa7e723a7f..e1dc9f777e537 100644 --- a/frame/bags-list/src/migrations.rs +++ b/frame/bags-list/src/migrations.rs @@ -115,7 +115,7 @@ impl, I: 'static> OnRuntimeUpgrade for AddScore { crate::ListNodes::::insert(node.id, new_node); } - return frame_support::weights::Weight::MAX; + return frame_support::weights::Weight::MAX } #[cfg(feature = "try-runtime")] diff --git a/frame/balances/src/lib.rs b/frame/balances/src/lib.rs index 530286552f1ad..d3085152eba6c 100644 --- a/frame/balances/src/lib.rs +++ b/frame/balances/src/lib.rs @@ -647,7 +647,7 @@ impl BitOr for Reasons { type Output = Reasons; fn bitor(self, other: Reasons) -> Reasons { if self == other { - return self; + return self } Reasons::All } @@ -806,11 +806,11 @@ impl, I: 'static> Pallet { mint: bool, ) -> DepositConsequence { if amount.is_zero() { - return DepositConsequence::Success; + return DepositConsequence::Success } if mint && TotalIssuance::::get().checked_add(&amount).is_none() { - return DepositConsequence::Overflow; + return DepositConsequence::Overflow } let new_total_balance = match account.total().checked_add(&amount) { @@ -819,7 +819,7 @@ impl, I: 'static> Pallet { }; if new_total_balance < T::ExistentialDeposit::get() { - return DepositConsequence::BelowMinimum; + return DepositConsequence::BelowMinimum } // NOTE: We assume that we are a provider, so don't need to do any checks in the @@ -834,11 +834,11 @@ impl, I: 'static> Pallet { account: &AccountData, ) -> WithdrawConsequence { if amount.is_zero() { - return WithdrawConsequence::Success; + return WithdrawConsequence::Success } if TotalIssuance::::get().checked_sub(&amount).is_none() { - return WithdrawConsequence::Underflow; + return WithdrawConsequence::Underflow } let new_total_balance = match account.total().checked_sub(&amount) { @@ -855,7 +855,7 @@ impl, I: 'static> Pallet { if frame_system::Pallet::::can_dec_provider(who) { WithdrawConsequence::ReducedToZero(new_total_balance) } else { - return WithdrawConsequence::WouldDie; + return WithdrawConsequence::WouldDie } } else { WithdrawConsequence::Success @@ -870,7 +870,7 @@ impl, I: 'static> Pallet { // Eventual free funds must be no less than the frozen balance. let min_balance = account.frozen(Reasons::All); if new_free_balance < min_balance { - return WithdrawConsequence::Frozen; + return WithdrawConsequence::Frozen } success @@ -1013,14 +1013,14 @@ impl, I: 'static> Pallet { status: Status, ) -> Result { if value.is_zero() { - return Ok(Zero::zero()); + return Ok(Zero::zero()) } if slashed == beneficiary { return match status { Status::Free => Ok(value.saturating_sub(Self::unreserve(slashed, value))), Status::Reserved => Ok(value.saturating_sub(Self::reserved_balance(slashed))), - }; + } } let ((actual, _maybe_one_dust), _maybe_other_dust) = Self::try_mutate_account_with_dust( @@ -1033,18 +1033,16 @@ impl, I: 'static> Pallet { let actual = cmp::min(from_account.reserved, value); ensure!(best_effort || actual == value, Error::::InsufficientBalance); match status { - Status::Free => { + Status::Free => to_account.free = to_account .free .checked_add(&actual) - .ok_or(ArithmeticError::Overflow)? - }, - Status::Reserved => { + .ok_or(ArithmeticError::Overflow)?, + Status::Reserved => to_account.reserved = to_account .reserved .checked_add(&actual) - .ok_or(ArithmeticError::Overflow)? - }, + .ok_or(ArithmeticError::Overflow)?, } from_account.reserved -= actual; Ok(actual) @@ -1103,7 +1101,7 @@ impl, I: 'static> fungible::Inspect for Pallet impl, I: 'static> fungible::Mutate for Pallet { fn mint_into(who: &T::AccountId, amount: Self::Balance) -> DispatchResult { if amount.is_zero() { - return Ok(()); + return Ok(()) } Self::try_mutate_account(who, |account, _is_new| -> DispatchResult { Self::deposit_consequence(who, amount, account, true).into_result()?; @@ -1120,7 +1118,7 @@ impl, I: 'static> fungible::Mutate for Pallet { amount: Self::Balance, ) -> Result { if amount.is_zero() { - return Ok(Self::Balance::zero()); + return Ok(Self::Balance::zero()) } let actual = Self::try_mutate_account( who, @@ -1179,7 +1177,7 @@ impl, I: 'static> fungible::InspectHold for Pallet, I: 'static> fungible::InspectHold for Pallet, I: 'static> fungible::MutateHold for Pallet { fn hold(who: &T::AccountId, amount: Self::Balance) -> DispatchResult { if amount.is_zero() { - return Ok(()); + return Ok(()) } ensure!(Self::can_reserve(who, amount), Error::::InsufficientBalance); Self::mutate_account(who, |a| { @@ -1208,7 +1206,7 @@ impl, I: 'static> fungible::MutateHold for Pallet Result { if amount.is_zero() { - return Ok(amount); + return Ok(amount) } // Done on a best-effort basis. Self::try_mutate_account(who, |a, _| { @@ -1414,7 +1412,7 @@ where // Check if `value` amount of free balance can be slashed from `who`. fn can_slash(who: &T::AccountId, value: Self::Balance) -> bool { if value.is_zero() { - return true; + return true } Self::free_balance(who) >= value } @@ -1431,7 +1429,7 @@ where // Is a no-op if amount to be burned is zero. fn burn(mut amount: Self::Balance) -> Self::PositiveImbalance { if amount.is_zero() { - return PositiveImbalance::zero(); + return PositiveImbalance::zero() } >::mutate(|issued| { *issued = issued.checked_sub(&amount).unwrap_or_else(|| { @@ -1447,7 +1445,7 @@ where // Is a no-op if amount to be issued it zero. fn issue(mut amount: Self::Balance) -> Self::NegativeImbalance { if amount.is_zero() { - return NegativeImbalance::zero(); + return NegativeImbalance::zero() } >::mutate(|issued| { *issued = issued.checked_add(&amount).unwrap_or_else(|| { @@ -1477,7 +1475,7 @@ where new_balance: T::Balance, ) -> DispatchResult { if amount.is_zero() { - return Ok(()); + return Ok(()) } let min_balance = Self::account(who).frozen(reasons.into()); ensure!(new_balance >= min_balance, Error::::LiquidityRestrictions); @@ -1493,7 +1491,7 @@ where existence_requirement: ExistenceRequirement, ) -> DispatchResult { if value.is_zero() || transactor == dest { - return Ok(()); + return Ok(()) } Self::try_mutate_account_with_dust( @@ -1561,10 +1559,10 @@ where /// inconsistent or `can_slash` wasn't used appropriately. fn slash(who: &T::AccountId, value: Self::Balance) -> (Self::NegativeImbalance, Self::Balance) { if value.is_zero() { - return (NegativeImbalance::zero(), Zero::zero()); + return (NegativeImbalance::zero(), Zero::zero()) } if Self::total_balance(who).is_zero() { - return (NegativeImbalance::zero(), value); + return (NegativeImbalance::zero(), value) } for attempt in 0..2 { @@ -1613,7 +1611,7 @@ where who: who.clone(), amount: value.saturating_sub(not_slashed), }); - return (imbalance, not_slashed); + return (imbalance, not_slashed) }, Err(_) => (), } @@ -1631,7 +1629,7 @@ where value: Self::Balance, ) -> Result { if value.is_zero() { - return Ok(PositiveImbalance::zero()); + return Ok(PositiveImbalance::zero()) } Self::try_mutate_account( @@ -1656,7 +1654,7 @@ where /// - `value` is so large it would cause the balance of `who` to overflow. fn deposit_creating(who: &T::AccountId, value: Self::Balance) -> Self::PositiveImbalance { if value.is_zero() { - return Self::PositiveImbalance::zero(); + return Self::PositiveImbalance::zero() } Self::try_mutate_account( @@ -1689,7 +1687,7 @@ where liveness: ExistenceRequirement, ) -> result::Result { if value.is_zero() { - return Ok(NegativeImbalance::zero()); + return Ok(NegativeImbalance::zero()) } Self::try_mutate_account( @@ -1762,7 +1760,7 @@ where /// Always `true` if value to be reserved is zero. fn can_reserve(who: &T::AccountId, value: Self::Balance) -> bool { if value.is_zero() { - return true; + return true } Self::account(who).free.checked_sub(&value).map_or(false, |new_balance| { Self::ensure_can_withdraw(who, value, WithdrawReasons::RESERVE, new_balance).is_ok() @@ -1778,7 +1776,7 @@ where /// Is a no-op if value to be reserved is zero. fn reserve(who: &T::AccountId, value: Self::Balance) -> DispatchResult { if value.is_zero() { - return Ok(()); + return Ok(()) } Self::try_mutate_account(who, |account, _| -> DispatchResult { @@ -1800,10 +1798,10 @@ where /// NOTE: returns amount value which wasn't successfully unreserved. fn unreserve(who: &T::AccountId, value: Self::Balance) -> Self::Balance { if value.is_zero() { - return Zero::zero(); + return Zero::zero() } if Self::total_balance(who).is_zero() { - return value; + return value } let actual = match Self::mutate_account(who, |account| { @@ -1819,7 +1817,7 @@ where // This should never happen since we don't alter the total amount in the account. // If it ever does, then we should fail gracefully though, indicating that nothing // could be done. - return value; + return value }, }; @@ -1836,10 +1834,10 @@ where value: Self::Balance, ) -> (Self::NegativeImbalance, Self::Balance) { if value.is_zero() { - return (NegativeImbalance::zero(), Zero::zero()); + return (NegativeImbalance::zero(), Zero::zero()) } if Self::total_balance(who).is_zero() { - return (NegativeImbalance::zero(), value); + return (NegativeImbalance::zero(), value) } // NOTE: `mutate_account` may fail if it attempts to reduce the balance to the point that an @@ -1868,7 +1866,7 @@ where who: who.clone(), amount: value.saturating_sub(not_slashed), }); - return (imbalance, not_slashed); + return (imbalance, not_slashed) }, Err(_) => (), } @@ -1917,7 +1915,7 @@ where value: Self::Balance, ) -> DispatchResult { if value.is_zero() { - return Ok(()); + return Ok(()) } Reserves::::try_mutate(who, |reserves| -> DispatchResult { @@ -1946,7 +1944,7 @@ where value: Self::Balance, ) -> Self::Balance { if value.is_zero() { - return Zero::zero(); + return Zero::zero() } Reserves::::mutate_exists(who, |maybe_reserves| -> Self::Balance { @@ -1993,7 +1991,7 @@ where value: Self::Balance, ) -> (Self::NegativeImbalance, Self::Balance) { if value.is_zero() { - return (NegativeImbalance::zero(), Zero::zero()); + return (NegativeImbalance::zero(), Zero::zero()) } Reserves::::mutate(who, |reserves| -> (Self::NegativeImbalance, Self::Balance) { @@ -2032,16 +2030,15 @@ where status: Status, ) -> Result { if value.is_zero() { - return Ok(Zero::zero()); + return Ok(Zero::zero()) } if slashed == beneficiary { return match status { Status::Free => Ok(Self::unreserve_named(id, slashed, value)), - Status::Reserved => { - Ok(value.saturating_sub(Self::reserved_balance_named(id, slashed))) - }, - }; + Status::Reserved => + Ok(value.saturating_sub(Self::reserved_balance_named(id, slashed))), + } } Reserves::::try_mutate(slashed, |reserves| -> Result { @@ -2139,7 +2136,7 @@ where reasons: WithdrawReasons, ) { if amount.is_zero() || reasons.is_empty() { - return; + return } let mut new_lock = Some(BalanceLock { id, amount, reasons: reasons.into() }); let mut locks = Self::locks(who) @@ -2161,7 +2158,7 @@ where reasons: WithdrawReasons, ) { if amount.is_zero() || reasons.is_empty() { - return; + return } let mut new_lock = Some(BalanceLock { id, amount, reasons: reasons.into() }); let mut locks = Self::locks(who) diff --git a/frame/beefy-mmr/primitives/src/lib.rs b/frame/beefy-mmr/primitives/src/lib.rs index 6ecb5c322e7e4..f88fb89acaaab 100644 --- a/frame/beefy-mmr/primitives/src/lib.rs +++ b/frame/beefy-mmr/primitives/src/lib.rs @@ -246,7 +246,7 @@ where L: Into>, { if leaf_index >= number_of_leaves { - return false; + return false } let leaf_hash = match leaf.into() { @@ -339,7 +339,7 @@ where "[merkelize_row] Next: {:?}", next.iter().map(|s| array_bytes::bytes2hex("", s.as_ref())).collect::>() ); - return Err(next); + return Err(next) }, } } diff --git a/frame/beefy/src/lib.rs b/frame/beefy/src/lib.rs index 8b82388578cd3..4cb23107e7843 100644 --- a/frame/beefy/src/lib.rs +++ b/frame/beefy/src/lib.rs @@ -152,11 +152,11 @@ impl Pallet { fn initialize_authorities(authorities: &Vec) -> Result<(), ()> { if authorities.is_empty() { - return Ok(()); + return Ok(()) } if !>::get().is_empty() { - return Err(()); + return Err(()) } let bounded_authorities = diff --git a/frame/benchmarking/src/analysis.rs b/frame/benchmarking/src/analysis.rs index 74fb0b942b482..0b77a92347d03 100644 --- a/frame/benchmarking/src/analysis.rs +++ b/frame/benchmarking/src/analysis.rs @@ -172,7 +172,7 @@ fn linear_regression( let (intercept, params, errors) = raw_linear_regression(&xs, &ys, x_vars, true)?; if intercept >= -0.0001 { // The intercept is positive, or is effectively zero. - return Some((intercept, params, errors[1..].to_vec())); + return Some((intercept, params, errors[1..].to_vec())) } // The intercept is negative. @@ -199,7 +199,7 @@ impl Analysis { // results. Note: We choose the median value because it is more robust to outliers. fn median_value(r: &Vec, selector: BenchmarkSelector) -> Option { if r.is_empty() { - return None; + return None } let mut values: Vec = r @@ -229,7 +229,7 @@ impl Analysis { pub fn median_slopes(r: &Vec, selector: BenchmarkSelector) -> Option { if r[0].components.is_empty() { - return Self::median_value(r, selector); + return Self::median_value(r, selector) } let results = r[0] @@ -329,7 +329,7 @@ impl Analysis { pub fn min_squares_iqr(r: &Vec, selector: BenchmarkSelector) -> Option { if r[0].components.is_empty() || r.len() <= 2 { - return Self::median_value(r, selector); + return Self::median_value(r, selector) } let mut results = BTreeMap::, Vec>::new(); @@ -356,7 +356,7 @@ impl Analysis { .map(|(p, vs)| { // Avoid divide by zero if vs.is_empty() { - return (p.clone(), 0, 0); + return (p.clone(), 0, 0) } let total = vs.iter().fold(0u128, |acc, v| acc + *v); let mean = total / vs.len() as u128; @@ -405,7 +405,7 @@ impl Analysis { let min_squares = Self::min_squares_iqr(r, selector); if median_slopes.is_none() || min_squares.is_none() { - return None; + return None } let median_slopes = median_slopes.unwrap(); @@ -438,7 +438,7 @@ fn ms(mut nanos: u128) -> String { while x > 1 { if nanos > x * 1_000 { nanos = nanos / x * x; - break; + break } x /= 10; } diff --git a/frame/benchmarking/src/lib.rs b/frame/benchmarking/src/lib.rs index 04b3204b892ed..a221eccb82c85 100644 --- a/frame/benchmarking/src/lib.rs +++ b/frame/benchmarking/src/lib.rs @@ -910,7 +910,7 @@ macro_rules! impl_bench_name_tests { // Every variant must implement [`BenchmarkingSetup`]. // // ```nocompile -// +// // struct Transfer; // impl BenchmarkingSetup for Transfer { ... } // diff --git a/frame/benchmarking/src/tests.rs b/frame/benchmarking/src/tests.rs index 7017e80d6075a..88a7d6d0286b2 100644 --- a/frame/benchmarking/src/tests.rs +++ b/frame/benchmarking/src/tests.rs @@ -66,7 +66,7 @@ mod pallet_test { #[pallet::weight(0)] pub fn always_error(_origin: OriginFor) -> DispatchResult { - return Err("I always fail".into()); + return Err("I always fail".into()) } } } diff --git a/frame/bounties/src/benchmarking.rs b/frame/bounties/src/benchmarking.rs index 5b5b3aa71bffa..07dd781c29af3 100644 --- a/frame/bounties/src/benchmarking.rs +++ b/frame/bounties/src/benchmarking.rs @@ -52,8 +52,8 @@ fn setup_bounty, I: 'static>( let caller = account("caller", u, SEED); let value: BalanceOf = T::BountyValueMinimum::get().saturating_mul(100u32.into()); let fee = value / 2u32.into(); - let deposit = T::BountyDepositBase::get() - + T::DataDepositPerByte::get() * T::MaximumReasonLength::get().into(); + let deposit = T::BountyDepositBase::get() + + T::DataDepositPerByte::get() * T::MaximumReasonLength::get().into(); let _ = T::Currency::make_free_balance_be(&caller, deposit); let curator = account("curator", u, SEED); let _ = T::Currency::make_free_balance_be(&curator, fee / 2u32.into()); diff --git a/frame/bounties/src/lib.rs b/frame/bounties/src/lib.rs index ae81befd7246d..d947226f87fa0 100644 --- a/frame/bounties/src/lib.rs +++ b/frame/bounties/src/lib.rs @@ -447,7 +447,7 @@ pub mod pallet { match bounty.status { BountyStatus::Proposed | BountyStatus::Approved | BountyStatus::Funded => { // No curator to unassign at this point. - return Err(Error::::UnexpectedStatus.into()); + return Err(Error::::UnexpectedStatus.into()) }, BountyStatus::CuratorProposed { ref curator } => { // A curator has been proposed, but not accepted yet. @@ -472,7 +472,7 @@ pub mod pallet { // Continue to change bounty status below... } else { // Curator has more time to give an update. - return Err(Error::::Premature.into()); + return Err(Error::::Premature.into()) } } else { // Else this is the curator, willingly giving up their role. @@ -528,8 +528,8 @@ pub mod pallet { T::Currency::reserve(curator, deposit)?; bounty.curator_deposit = deposit; - let update_due = frame_system::Pallet::::block_number() - + T::BountyUpdatePeriod::get(); + let update_due = frame_system::Pallet::::block_number() + + T::BountyUpdatePeriod::get(); bounty.status = BountyStatus::Active { curator: curator.clone(), update_due }; @@ -579,8 +579,8 @@ pub mod pallet { bounty.status = BountyStatus::PendingPayout { curator: signer, beneficiary: beneficiary.clone(), - unlock_at: frame_system::Pallet::::block_number() - + T::BountyDepositPayoutDelay::get(), + unlock_at: frame_system::Pallet::::block_number() + + T::BountyDepositPayoutDelay::get(), }; Ok(()) @@ -697,12 +697,12 @@ pub mod pallet { // Return early, nothing else to do. return Ok( Some(>::WeightInfo::close_bounty_proposed()).into() - ); + ) }, BountyStatus::Approved => { // For weight reasons, we don't allow a council to cancel in this phase. // We ask for them to wait until it is funded before they can cancel. - return Err(Error::::UnexpectedStatus.into()); + return Err(Error::::UnexpectedStatus.into()) }, BountyStatus::Funded | BountyStatus::CuratorProposed { .. } => { // Nothing extra to do besides the removal of the bounty below. @@ -719,7 +719,7 @@ pub mod pallet { // this bounty, it should mean the curator was acting maliciously. // So the council should first unassign the curator, slashing their // deposit. - return Err(Error::::PendingPayout.into()); + return Err(Error::::PendingPayout.into()) }, } @@ -767,8 +767,8 @@ pub mod pallet { match bounty.status { BountyStatus::Active { ref curator, ref mut update_due } => { ensure!(*curator == signer, Error::::RequireCurator); - *update_due = (frame_system::Pallet::::block_number() - + T::BountyUpdatePeriod::get()) + *update_due = (frame_system::Pallet::::block_number() + + T::BountyUpdatePeriod::get()) .max(*update_due); }, _ => return Err(Error::::UnexpectedStatus.into()), @@ -825,8 +825,8 @@ impl, I: 'static> Pallet { let index = Self::bounty_count(); // reserve deposit for new bounty - let bond = T::BountyDepositBase::get() - + T::DataDepositPerByte::get() * (bounded_description.len() as u32).into(); + let bond = T::BountyDepositBase::get() + + T::DataDepositPerByte::get() * (bounded_description.len() as u32).into(); T::Currency::reserve(&proposer, bond) .map_err(|_| Error::::InsufficientProposersBalance)?; diff --git a/frame/bounties/src/migrations/v4.rs b/frame/bounties/src/migrations/v4.rs index af7b24f30d476..2f81c97127bcd 100644 --- a/frame/bounties/src/migrations/v4.rs +++ b/frame/bounties/src/migrations/v4.rs @@ -54,7 +54,7 @@ pub fn migrate< target: "runtime::bounties", "New pallet name is equal to the old prefix. No migration needs to be done.", ); - return Weight::zero(); + return Weight::zero() } let on_chain_storage_version =

::on_chain_storage_version(); diff --git a/frame/child-bounties/src/benchmarking.rs b/frame/child-bounties/src/benchmarking.rs index ebd7e94a4a1b7..697ed40e0071f 100644 --- a/frame/child-bounties/src/benchmarking.rs +++ b/frame/child-bounties/src/benchmarking.rs @@ -61,8 +61,8 @@ fn setup_bounty( let caller = account("caller", user, SEED); let value: BalanceOf = T::BountyValueMinimum::get().saturating_mul(100u32.into()); let fee = value / 2u32.into(); - let deposit = T::BountyDepositBase::get() - + T::DataDepositPerByte::get() * T::MaximumReasonLength::get().into(); + let deposit = T::BountyDepositBase::get() + + T::DataDepositPerByte::get() * T::MaximumReasonLength::get().into(); let _ = T::Currency::make_free_balance_be(&caller, deposit); let curator = account("curator", user, SEED); let _ = T::Currency::make_free_balance_be(&curator, fee / 2u32.into()); diff --git a/frame/child-bounties/src/lib.rs b/frame/child-bounties/src/lib.rs index 8b87a8509ffc7..2dfe0660ad68e 100644 --- a/frame/child-bounties/src/lib.rs +++ b/frame/child-bounties/src/lib.rs @@ -251,8 +251,8 @@ pub mod pallet { description.try_into().map_err(|_| BountiesError::::ReasonTooBig)?; ensure!(value >= T::ChildBountyValueMinimum::get(), BountiesError::::InvalidValue); ensure!( - Self::parent_child_bounties(parent_bounty_id) - <= T::MaxActiveChildBountyCount::get() as u32, + Self::parent_child_bounties(parent_bounty_id) <= + T::MaxActiveChildBountyCount::get() as u32, Error::::TooManyChildBounties, ); @@ -483,7 +483,7 @@ pub mod pallet { match child_bounty.status { ChildBountyStatus::Added => { // No curator to unassign at this point. - return Err(BountiesError::::UnexpectedStatus.into()); + return Err(BountiesError::::UnexpectedStatus.into()) }, ChildBountyStatus::CuratorProposed { ref curator } => { // A child-bounty curator has been proposed, but not accepted yet. @@ -491,8 +491,8 @@ pub mod pallet { // child-bounty curator can unassign the child-bounty curator. ensure!( maybe_sender.map_or(true, |sender| { - sender == *curator - || Self::ensure_bounty_active(parent_bounty_id) + sender == *curator || + Self::ensure_bounty_active(parent_bounty_id) .map_or(false, |(parent_curator, _)| { sender == parent_curator }) @@ -521,8 +521,8 @@ pub mod pallet { Some(sender) => { let (parent_curator, update_due) = Self::ensure_bounty_active(parent_bounty_id)?; - if sender == parent_curator - || update_due < frame_system::Pallet::::block_number() + if sender == parent_curator || + update_due < frame_system::Pallet::::block_number() { // Slash the child-bounty curator if // + the call is made by the parent bounty curator. @@ -531,7 +531,7 @@ pub mod pallet { // Continue to change bounty status below. } else { // Curator has more time to give an update. - return Err(BountiesError::::Premature.into()); + return Err(BountiesError::::Premature.into()) } }, } @@ -600,8 +600,8 @@ pub mod pallet { child_bounty.status = ChildBountyStatus::PendingPayout { curator: signer, beneficiary: beneficiary.clone(), - unlock_at: frame_system::Pallet::::block_number() - + T::BountyDepositPayoutDelay::get(), + unlock_at: frame_system::Pallet::::block_number() + + T::BountyDepositPayoutDelay::get(), }; Ok(()) } else { @@ -775,7 +775,7 @@ impl Pallet { bounty_fee: &BalanceOf, ) -> BalanceOf { if parent_curator == child_curator { - return Zero::zero(); + return Zero::zero() } // We just use the same logic from the parent bounties pallet. @@ -847,7 +847,7 @@ impl Pallet { // child-bounty, it should mean the child-bounty curator // was acting maliciously. So first unassign the // child-bounty curator, slashing their deposit. - return Err(BountiesError::::PendingPayout.into()); + return Err(BountiesError::::PendingPayout.into()) }, } diff --git a/frame/collective/src/lib.rs b/frame/collective/src/lib.rs index 01c8e1ae0b381..06d5b1fab78e7 100644 --- a/frame/collective/src/lib.rs +++ b/frame/collective/src/lib.rs @@ -819,7 +819,7 @@ impl, I: 'static> Pallet { if position_yes.is_none() { voting.ayes.push(who.clone()); } else { - return Err(Error::::DuplicateVote.into()); + return Err(Error::::DuplicateVote.into()) } if let Some(pos) = position_no { voting.nays.swap_remove(pos); @@ -828,7 +828,7 @@ impl, I: 'static> Pallet { if position_no.is_none() { voting.nays.push(who.clone()); } else { - return Err(Error::::DuplicateVote.into()); + return Err(Error::::DuplicateVote.into()) } if let Some(pos) = position_yes { voting.ayes.swap_remove(pos); @@ -882,7 +882,7 @@ impl, I: 'static> Pallet { ), Pays::Yes, ) - .into()); + .into()) } else if disapproved { Self::deposit_event(Event::Closed { proposal_hash, yes: yes_votes, no: no_votes }); let proposal_count = Self::do_disapprove_proposal(proposal_hash); @@ -890,7 +890,7 @@ impl, I: 'static> Pallet { Some(T::WeightInfo::close_early_disapproved(seats, proposal_count)), Pays::No, ) - .into()); + .into()) } // Only allow actual closing of the proposal after the voting period has ended. diff --git a/frame/collective/src/migrations/v4.rs b/frame/collective/src/migrations/v4.rs index 8729066502c9d..483c3f9fa9e69 100644 --- a/frame/collective/src/migrations/v4.rs +++ b/frame/collective/src/migrations/v4.rs @@ -45,7 +45,7 @@ pub fn migrate::on_chain_storage_version(); @@ -84,7 +84,7 @@ pub fn pre_migrate>(old_p log_migration("pre-migration", old_pallet_name, new_pallet_name); if new_pallet_name == old_pallet_name { - return; + return } let new_pallet_prefix = twox_128(new_pallet_name.as_bytes()); @@ -112,7 +112,7 @@ pub fn post_migrate>(old_ log_migration("post-migration", old_pallet_name, new_pallet_name); if new_pallet_name == old_pallet_name { - return; + return } // Assert that nothing remains at the old prefix. diff --git a/frame/contracts/primitives/src/lib.rs b/frame/contracts/primitives/src/lib.rs index 7ca42ac2c8155..4faea9eb3ee75 100644 --- a/frame/contracts/primitives/src/lib.rs +++ b/frame/contracts/primitives/src/lib.rs @@ -200,20 +200,18 @@ where match (self, rhs) { (Charge(lhs), Charge(rhs)) => Charge(lhs.saturating_add(*rhs)), (Refund(lhs), Refund(rhs)) => Refund(lhs.saturating_add(*rhs)), - (Charge(lhs), Refund(rhs)) => { + (Charge(lhs), Refund(rhs)) => if lhs >= rhs { Charge(lhs.saturating_sub(*rhs)) } else { Refund(rhs.saturating_sub(*lhs)) - } - }, - (Refund(lhs), Charge(rhs)) => { + }, + (Refund(lhs), Charge(rhs)) => if lhs > rhs { Refund(lhs.saturating_sub(*rhs)) } else { Charge(rhs.saturating_sub(*lhs)) - } - }, + }, } } @@ -223,20 +221,18 @@ where match (self, rhs) { (Charge(lhs), Refund(rhs)) => Charge(lhs.saturating_add(*rhs)), (Refund(lhs), Charge(rhs)) => Refund(lhs.saturating_add(*rhs)), - (Charge(lhs), Charge(rhs)) => { + (Charge(lhs), Charge(rhs)) => if lhs >= rhs { Charge(lhs.saturating_sub(*rhs)) } else { Refund(rhs.saturating_sub(*lhs)) - } - }, - (Refund(lhs), Refund(rhs)) => { + }, + (Refund(lhs), Refund(rhs)) => if lhs > rhs { Refund(lhs.saturating_sub(*rhs)) } else { Charge(rhs.saturating_sub(*lhs)) - } - }, + }, } } diff --git a/frame/contracts/proc-macro/src/lib.rs b/frame/contracts/proc-macro/src/lib.rs index 7a3e4dad093f7..399a1b413f121 100644 --- a/frame/contracts/proc-macro/src/lib.rs +++ b/frame/contracts/proc-macro/src/lib.rs @@ -60,7 +60,7 @@ fn derive_debug(input: TokenStream, fmt: impl Fn(&Ident) -> TokenStream2) -> Tok name.span() => compile_error!("WeightDebug is only supported for structs."); } - .into(); + .into() }; #[cfg(feature = "full")] @@ -97,7 +97,7 @@ fn iterate_fields(data: &syn::DataStruct, fmt: impl Fn(&Ident) -> TokenStream2) let recurse = fields.named.iter().filter_map(|f| { let name = f.ident.as_ref()?; if name.to_string().starts_with('_') { - return None; + return None } let value = fmt(name); let ret = quote_spanned! { f.span() => @@ -247,7 +247,7 @@ impl HostFn { match &result.arguments { syn::PathArguments::AngleBracketed(group) => { if group.args.len() != 2 { - return Err(err(span, &msg)); + return Err(err(span, &msg)) }; let arg2 = group.args.last().ok_or(err(span, &msg))?; @@ -286,7 +286,7 @@ impl HostFn { .to_string()), syn::Type::Tuple(tt) => { if !tt.elems.is_empty() { - return Err(err(arg1.span(), &msg)); + return Err(err(arg1.span(), &msg)) }; Ok("()".to_string()) }, diff --git a/frame/contracts/src/benchmarking/code.rs b/frame/contracts/src/benchmarking/code.rs index a6b1429257df7..c1e9f3208b286 100644 --- a/frame/contracts/src/benchmarking/code.rs +++ b/frame/contracts/src/benchmarking/code.rs @@ -497,12 +497,10 @@ pub mod body { DynInstr::RandomI32(low, high) => { vec![Instruction::I32Const(rng.gen_range(*low..*high))] }, - DynInstr::RandomI32Repeated(num) => { - (&mut rng).sample_iter(Standard).take(*num).map(Instruction::I32Const).collect() - }, - DynInstr::RandomI64Repeated(num) => { - (&mut rng).sample_iter(Standard).take(*num).map(Instruction::I64Const).collect() - }, + DynInstr::RandomI32Repeated(num) => + (&mut rng).sample_iter(Standard).take(*num).map(Instruction::I32Const).collect(), + DynInstr::RandomI64Repeated(num) => + (&mut rng).sample_iter(Standard).take(*num).map(Instruction::I64Const).collect(), DynInstr::RandomGetLocal(low, high) => { vec![Instruction::GetLocal(rng.gen_range(*low..*high))] }, diff --git a/frame/contracts/src/benchmarking/mod.rs b/frame/contracts/src/benchmarking/mod.rs index 692c78497e880..2494a4cbebd55 100644 --- a/frame/contracts/src/benchmarking/mod.rs +++ b/frame/contracts/src/benchmarking/mod.rs @@ -165,16 +165,16 @@ where /// Returns `true` iff all storage entries related to code storage exist. fn code_exists(hash: &CodeHash) -> bool { - >::contains_key(hash) - && >::contains_key(&hash) - && >::contains_key(&hash) + >::contains_key(hash) && + >::contains_key(&hash) && + >::contains_key(&hash) } /// Returns `true` iff no storage entry related to code storage exist. fn code_removed(hash: &CodeHash) -> bool { - !>::contains_key(hash) - && !>::contains_key(&hash) - && !>::contains_key(&hash) + !>::contains_key(hash) && + !>::contains_key(&hash) && + !>::contains_key(&hash) } } diff --git a/frame/contracts/src/exec.rs b/frame/contracts/src/exec.rs index 83b8c9e9ded73..2884779d8fda7 100644 --- a/frame/contracts/src/exec.rs +++ b/frame/contracts/src/exec.rs @@ -758,11 +758,11 @@ where // `AllowIndeterminism` will only be ever set in case of off-chain execution. // Instantiations are never allowed even when executing off-chain. - if !(executable.is_deterministic() - || (matches!(determinism, Determinism::AllowIndeterminism) - && matches!(entry_point, ExportedFunction::Call))) + if !(executable.is_deterministic() || + (matches!(determinism, Determinism::AllowIndeterminism) && + matches!(entry_point, ExportedFunction::Call))) { - return Err(Error::::Indeterministic.into()); + return Err(Error::::Indeterministic.into()) } let frame = Frame { @@ -787,7 +787,7 @@ where gas_limit: Weight, ) -> Result { if self.frames.len() == T::CallStack::size() { - return Err(Error::::MaxCallDepthReached.into()); + return Err(Error::::MaxCallDepthReached.into()) } // We need to make sure that changes made to the contract info are not discarded. @@ -847,7 +847,7 @@ where // Avoid useless work that would be reverted anyways. if output.did_revert() { - return Ok(output); + return Ok(output) } // Storage limit is enforced as late as possible (when the last frame returns) so that @@ -865,7 +865,7 @@ where (ExportedFunction::Constructor, _) => { // It is not allowed to terminate a contract inside its constructor. if matches!(frame.contract_info, CachedContract::Terminated) { - return Err(Error::::TerminatedInConstructor.into()); + return Err(Error::::TerminatedInConstructor.into()) } // Deposit an instantiation event. @@ -905,9 +905,8 @@ where with_transaction(|| -> TransactionOutcome> { let output = do_transaction(); match &output { - Ok(result) if !result.did_revert() => { - TransactionOutcome::Commit(Ok((true, output))) - }, + Ok(result) if !result.did_revert() => + TransactionOutcome::Commit(Ok((true, output))), _ => TransactionOutcome::Rollback(Ok((false, output))), } }); @@ -949,7 +948,7 @@ where // Only gas counter changes are persisted in case of a failure. if !persist { - return; + return } // Record the storage meter changes of the nested call into the parent meter. @@ -968,7 +967,7 @@ where // trigger a rollback. if prev.account_id == *account_id { prev.contract_info = CachedContract::Cached(contract); - return; + return } // Predecessor is a different contract: We persist the info and invalidate the first @@ -991,7 +990,7 @@ where } self.gas_meter.absorb_nested(mem::take(&mut self.first_frame.nested_gas)); if !persist { - return; + return } let mut contract = self.first_frame.contract_info.as_contract(); self.storage_meter.absorb( @@ -1027,7 +1026,7 @@ where // If it is a delegate call, then we've already transferred tokens in the // last non-delegate frame. if frame.delegate_caller.is_some() { - return Ok(()); + return Ok(()) } let value = frame.value_transferred; @@ -1107,7 +1106,7 @@ where let try_call = || { if !self.allows_reentry(&to) { - return Err(>::ReentranceDenied.into()); + return Err(>::ReentranceDenied.into()) } // We ignore instantiate frames in our search for a cached contract. // Otherwise it would be possible to recursively call a contract from its own @@ -1184,7 +1183,7 @@ where fn terminate(&mut self, beneficiary: &AccountIdOf) -> Result<(), DispatchError> { if self.is_recursive() { - return Err(Error::::TerminatedWhileReentrant.into()); + return Err(Error::::TerminatedWhileReentrant.into()) } let frame = self.top_frame_mut(); let info = frame.terminate(); @@ -1368,7 +1367,7 @@ where fn set_code_hash(&mut self, hash: CodeHash) -> Result<(), DispatchError> { let frame = top_frame_mut!(self); if !E::from_storage(hash, self.schedule, &mut frame.nested_gas)?.is_deterministic() { - return Err(>::Indeterministic.into()); + return Err(>::Indeterministic.into()) } E::add_user(hash)?; let prev_hash = frame.contract_info().code_hash; diff --git a/frame/contracts/src/lib.rs b/frame/contracts/src/lib.rs index cf8bd4b6507bf..00b0655ea4af6 100644 --- a/frame/contracts/src/lib.rs +++ b/frame/contracts/src/lib.rs @@ -516,7 +516,7 @@ pub mod pallet { let contract = if let Some(contract) = contract { contract } else { - return Err(>::ContractNotFound.into()); + return Err(>::ContractNotFound.into()) }; >::add_user(code_hash)?; >::remove_user(contract.code_hash); @@ -1096,13 +1096,12 @@ where let mut gas_meter = GasMeter::new(gas_limit); let mut storage_meter = match StorageMeter::new(&origin, storage_deposit_limit, value) { Ok(meter) => meter, - Err(err) => { + Err(err) => return InternalCallOutput { result: Err(err.into()), gas_meter, storage_deposit: Default::default(), - } - }, + }, }; let schedule = T::Schedule::get(); let result = ExecStack::>::run_call( diff --git a/frame/contracts/src/migration.rs b/frame/contracts/src/migration.rs index bb673b2e7b1c2..aa04d8b9b1084 100644 --- a/frame/contracts/src/migration.rs +++ b/frame/contracts/src/migration.rs @@ -420,7 +420,7 @@ mod post_checks { pub fn post_upgrade(old_version: StorageVersion) -> Result<(), &'static str> { if old_version < 7 { - return Ok(()); + return Ok(()) } if old_version < 8 { diff --git a/frame/contracts/src/schedule.rs b/frame/contracts/src/schedule.rs index edf175a6727b7..79f9f49e58190 100644 --- a/frame/contracts/src/schedule.rs +++ b/frame/contracts/src/schedule.rs @@ -709,25 +709,25 @@ impl<'a, T: Config> gas_metering::Rules for ScheduleRules<'a, T> { let weight = match *instruction { End | Unreachable | Return | Else => 0, I32Const(_) | I64Const(_) | Block(_) | Loop(_) | Nop | Drop => w.i64const, - I32Load(_, _) - | I32Load8S(_, _) - | I32Load8U(_, _) - | I32Load16S(_, _) - | I32Load16U(_, _) - | I64Load(_, _) - | I64Load8S(_, _) - | I64Load8U(_, _) - | I64Load16S(_, _) - | I64Load16U(_, _) - | I64Load32S(_, _) - | I64Load32U(_, _) => w.i64load, - I32Store(_, _) - | I32Store8(_, _) - | I32Store16(_, _) - | I64Store(_, _) - | I64Store8(_, _) - | I64Store16(_, _) - | I64Store32(_, _) => w.i64store, + I32Load(_, _) | + I32Load8S(_, _) | + I32Load8U(_, _) | + I32Load16S(_, _) | + I32Load16U(_, _) | + I64Load(_, _) | + I64Load8S(_, _) | + I64Load8U(_, _) | + I64Load16S(_, _) | + I64Load16U(_, _) | + I64Load32S(_, _) | + I64Load32U(_, _) => w.i64load, + I32Store(_, _) | + I32Store8(_, _) | + I32Store16(_, _) | + I64Store(_, _) | + I64Store8(_, _) | + I64Store16(_, _) | + I64Store32(_, _) => w.i64store, Select => w.select, If(_) => w.r#if, Br(_) => w.br, @@ -780,9 +780,8 @@ impl<'a, T: Config> gas_metering::Rules for ScheduleRules<'a, T> { // Returning None makes the gas instrumentation fail which we intend for // unsupported or unknown instructions. Offchain we might allow indeterminism and hence // use the fallback weight for those instructions. - _ if matches!(self.determinism, Determinism::AllowIndeterminism) && w.fallback > 0 => { - w.fallback - }, + _ if matches!(self.determinism, Determinism::AllowIndeterminism) && w.fallback > 0 => + w.fallback, _ => return None, }; Some(weight) diff --git a/frame/contracts/src/storage.rs b/frame/contracts/src/storage.rs index 7834fa8ba2a5e..c7644e696196f 100644 --- a/frame/contracts/src/storage.rs +++ b/frame/contracts/src/storage.rs @@ -182,13 +182,12 @@ where if let Some(storage_meter) = storage_meter { let mut diff = meter::Diff::default(); match (old_len, new_value.as_ref().map(|v| v.len() as u32)) { - (Some(old_len), Some(new_len)) => { + (Some(old_len), Some(new_len)) => if new_len > old_len { diff.bytes_added = new_len - old_len; } else { diff.bytes_removed = old_len - new_len; - } - }, + }, (None, Some(new_len)) => { diff.bytes_added = new_len; diff.items_added = 1; @@ -224,7 +223,7 @@ where code_hash: CodeHash, ) -> Result, DispatchError> { if >::contains_key(account) { - return Err(Error::::DuplicateContract.into()); + return Err(Error::::DuplicateContract.into()) } let contract = ContractInfo:: { @@ -253,10 +252,10 @@ where /// and weight limit. pub fn deletion_budget(queue_len: usize, weight_limit: Weight) -> (u64, u32) { let base_weight = T::WeightInfo::on_process_deletion_queue_batch(); - let weight_per_queue_item = T::WeightInfo::on_initialize_per_queue_item(1) - - T::WeightInfo::on_initialize_per_queue_item(0); - let weight_per_key = (T::WeightInfo::on_initialize_per_trie_key(1) - - T::WeightInfo::on_initialize_per_trie_key(0)) + let weight_per_queue_item = T::WeightInfo::on_initialize_per_queue_item(1) - + T::WeightInfo::on_initialize_per_queue_item(0); + let weight_per_key = (T::WeightInfo::on_initialize_per_trie_key(1) - + T::WeightInfo::on_initialize_per_trie_key(0)) .ref_time(); let decoding_weight = weight_per_queue_item.saturating_mul(queue_len as u64); @@ -278,7 +277,7 @@ where pub fn process_deletion_queue_batch(weight_limit: Weight) -> Weight { let queue_len = >::decode_len().unwrap_or(0); if queue_len == 0 { - return Weight::zero(); + return Weight::zero() } let (weight_per_key, mut remaining_key_budget) = @@ -288,7 +287,7 @@ where // proceeding. Too little weight for decoding might happen during runtime upgrades // which consume the whole block before the other `on_initialize` blocks are called. if remaining_key_budget == 0 { - return weight_limit; + return weight_limit } let mut queue = >::get(); diff --git a/frame/contracts/src/storage/meter.rs b/frame/contracts/src/storage/meter.rs index 3b1f52eeb15c3..0a63eb42b86cb 100644 --- a/frame/contracts/src/storage/meter.rs +++ b/frame/contracts/src/storage/meter.rs @@ -159,7 +159,7 @@ impl Diff { } else { debug_assert_eq!(self.bytes_removed, 0); debug_assert_eq!(self.items_removed, 0); - return bytes_deposit.saturating_add(&items_deposit); + return bytes_deposit.saturating_add(&items_deposit) }; // Refunds are calculated pro rata based on the accumulated storage within the contract @@ -182,20 +182,16 @@ impl Diff { info.storage_items = info.storage_items.saturating_add(items_added).saturating_sub(items_removed); match &bytes_deposit { - Deposit::Charge(amount) => { - info.storage_byte_deposit = info.storage_byte_deposit.saturating_add(*amount) - }, - Deposit::Refund(amount) => { - info.storage_byte_deposit = info.storage_byte_deposit.saturating_sub(*amount) - }, + Deposit::Charge(amount) => + info.storage_byte_deposit = info.storage_byte_deposit.saturating_add(*amount), + Deposit::Refund(amount) => + info.storage_byte_deposit = info.storage_byte_deposit.saturating_sub(*amount), } match &items_deposit { - Deposit::Charge(amount) => { - info.storage_item_deposit = info.storage_item_deposit.saturating_add(*amount) - }, - Deposit::Refund(amount) => { - info.storage_item_deposit = info.storage_item_deposit.saturating_sub(*amount) - }, + Deposit::Charge(amount) => + info.storage_item_deposit = info.storage_item_deposit.saturating_add(*amount), + Deposit::Refund(amount) => + info.storage_item_deposit = info.storage_item_deposit.saturating_sub(*amount), } bytes_deposit.saturating_add(&items_deposit) @@ -395,7 +391,7 @@ where // contract's account into existence. deposit = deposit.max(Deposit::Charge(Pallet::::min_balance())); if deposit.charge_or_zero() > self.limit { - return Err(>::StorageDepositLimitExhausted.into()); + return Err(>::StorageDepositLimitExhausted.into()) } // We do not increase `own_contribution` because this will be charged later when the @@ -438,7 +434,7 @@ where } if let Deposit::Charge(amount) = total_deposit { if amount > self.limit { - return Err(>::StorageDepositLimitExhausted.into()); + return Err(>::StorageDepositLimitExhausted.into()) } } Ok(()) @@ -458,8 +454,8 @@ where let max = T::Currency::reducible_balance(origin, true).saturating_sub(min_leftover); let limit = limit.unwrap_or(max); ensure!( - limit <= max - && matches!(T::Currency::can_withdraw(origin, limit), WithdrawConsequence::Success), + limit <= max && + matches!(T::Currency::can_withdraw(origin, limit), WithdrawConsequence::Success), >::StorageDepositNotEnoughFunds, ); Ok(limit) diff --git a/frame/contracts/src/tests.rs b/frame/contracts/src/tests.rs index 29c6f7a5de860..e7b27ed38e271 100644 --- a/frame/contracts/src/tests.rs +++ b/frame/contracts/src/tests.rs @@ -2514,8 +2514,8 @@ fn reinstrument_does_charge() { assert!(result2.gas_consumed.ref_time() > result1.gas_consumed.ref_time()); assert_eq!( result2.gas_consumed.ref_time(), - result1.gas_consumed.ref_time() - + ::WeightInfo::reinstrument(code_len).ref_time(), + result1.gas_consumed.ref_time() + + ::WeightInfo::reinstrument(code_len).ref_time(), ); }); } diff --git a/frame/contracts/src/wasm/mod.rs b/frame/contracts/src/wasm/mod.rs index 4ceea222384f3..86bc377b81307 100644 --- a/frame/contracts/src/wasm/mod.rs +++ b/frame/contracts/src/wasm/mod.rs @@ -514,9 +514,8 @@ mod tests { let entry = self.storage.entry(key.clone()); let result = match (entry, take_old) { (Entry::Vacant(_), _) => WriteOutcome::New, - (Entry::Occupied(entry), false) => { - WriteOutcome::Overwritten(entry.remove().len() as u32) - }, + (Entry::Occupied(entry), false) => + WriteOutcome::Overwritten(entry.remove().len() as u32), (Entry::Occupied(entry), true) => WriteOutcome::Taken(entry.remove()), }; if let Some(value) = value { @@ -534,9 +533,8 @@ mod tests { let entry = self.storage.entry(key.clone()); let result = match (entry, take_old) { (Entry::Vacant(_), _) => WriteOutcome::New, - (Entry::Occupied(entry), false) => { - WriteOutcome::Overwritten(entry.remove().len() as u32) - }, + (Entry::Occupied(entry), false) => + WriteOutcome::Overwritten(entry.remove().len() as u32), (Entry::Occupied(entry), true) => WriteOutcome::Taken(entry.remove()), }; if let Some(value) = value { diff --git a/frame/contracts/src/wasm/prepare.rs b/frame/contracts/src/wasm/prepare.rs index 95c6bb66a43cd..fb5ae1229078f 100644 --- a/frame/contracts/src/wasm/prepare.rs +++ b/frame/contracts/src/wasm/prepare.rs @@ -81,7 +81,7 @@ impl<'a, T: Config> ContractModule<'a, T> { /// we reject such a module. fn ensure_no_internal_memory(&self) -> Result<(), &'static str> { if self.module.memory_section().map_or(false, |ms| ms.entries().len() > 0) { - return Err("module declares internal memory"); + return Err("module declares internal memory") } Ok(()) } @@ -92,13 +92,13 @@ impl<'a, T: Config> ContractModule<'a, T> { // In Wasm MVP spec, there may be at most one table declared. Double check this // explicitly just in case the Wasm version changes. if table_section.entries().len() > 1 { - return Err("multiple tables declared"); + return Err("multiple tables declared") } if let Some(table_type) = table_section.entries().first() { // Check the table's initial size as there is no instruction or environment function // capable of growing the table. if table_type.limits().initial() > limit { - return Err("table exceeds maximum size allowed"); + return Err("table exceeds maximum size allowed") } } } @@ -110,13 +110,13 @@ impl<'a, T: Config> ContractModule<'a, T> { let code_section = if let Some(type_section) = self.module.code_section() { type_section } else { - return Ok(()); + return Ok(()) }; for instr in code_section.bodies().iter().flat_map(|body| body.code().elements()) { use self::elements::Instruction::BrTable; if let BrTable(table) = instr { if table.table.len() > limit as usize { - return Err("BrTable's immediate value is too big."); + return Err("BrTable's immediate value is too big.") } } } @@ -126,7 +126,7 @@ impl<'a, T: Config> ContractModule<'a, T> { fn ensure_global_variable_limit(&self, limit: u32) -> Result<(), &'static str> { if let Some(global_section) = self.module.global_section() { if global_section.entries().len() > limit as usize { - return Err("module declares too many globals"); + return Err("module declares too many globals") } } Ok(()) @@ -137,9 +137,8 @@ impl<'a, T: Config> ContractModule<'a, T> { if let Some(global_section) = self.module.global_section() { for global in global_section.entries() { match global.global_type().content_type() { - ValueType::F32 | ValueType::F64 => { - return Err("use of floating point type in globals is forbidden") - }, + ValueType::F32 | ValueType::F64 => + return Err("use of floating point type in globals is forbidden"), _ => {}, } } @@ -149,9 +148,8 @@ impl<'a, T: Config> ContractModule<'a, T> { for func_body in code_section.bodies() { for local in func_body.locals() { match local.value_type() { - ValueType::F32 | ValueType::F64 => { - return Err("use of floating point type in locals is forbidden") - }, + ValueType::F32 | ValueType::F64 => + return Err("use of floating point type in locals is forbidden"), _ => {}, } } @@ -165,11 +163,10 @@ impl<'a, T: Config> ContractModule<'a, T> { let return_type = func_type.results().get(0); for value_type in func_type.params().iter().chain(return_type) { match value_type { - ValueType::F32 | ValueType::F64 => { + ValueType::F32 | ValueType::F64 => return Err( "use of floating point type in function types is forbidden", - ) - }, + ), _ => {}, } } @@ -186,12 +183,12 @@ impl<'a, T: Config> ContractModule<'a, T> { let type_section = if let Some(type_section) = self.module.type_section() { type_section } else { - return Ok(()); + return Ok(()) }; for Type::Function(func) in type_section.types() { if func.params().len() > limit as usize { - return Err("Use of a function type with too many parameters."); + return Err("Use of a function type with too many parameters.") } } @@ -263,7 +260,7 @@ impl<'a, T: Config> ContractModule<'a, T> { Some(fn_idx) => fn_idx, None => { // Underflow here means fn_idx points to imported function which we don't allow! - return Err("entry point points to an imported function"); + return Err("entry point points to an imported function") }, }; @@ -276,18 +273,18 @@ impl<'a, T: Config> ContractModule<'a, T> { .type_ref(); let Type::Function(ref func_ty) = types.get(func_ty_idx as usize).ok_or("function has a non-existent type")?; - if !(func_ty.params().is_empty() - && (func_ty.results().is_empty() || func_ty.results() == [ValueType::I32])) + if !(func_ty.params().is_empty() && + (func_ty.results().is_empty() || func_ty.results() == [ValueType::I32])) { - return Err("entry point has wrong signature"); + return Err("entry point has wrong signature") } } if !deploy_found { - return Err("deploy function isn't exported"); + return Err("deploy function isn't exported") } if !call_found { - return Err("call function isn't exported"); + return Err("call function isn't exported") } Ok(()) @@ -324,16 +321,16 @@ impl<'a, T: Config> ContractModule<'a, T> { }, External::Memory(ref memory_type) => { if import.module() != IMPORT_MODULE_MEMORY { - return Err("Invalid module for imported memory"); + return Err("Invalid module for imported memory") } if import.field() != "memory" { - return Err("Memory import must have the field name 'memory'"); + return Err("Memory import must have the field name 'memory'") } if imported_mem_type.is_some() { - return Err("Multiple memory imports defined"); + return Err("Multiple memory imports defined") } imported_mem_type = Some(memory_type); - continue; + continue }, } } @@ -353,12 +350,10 @@ fn get_memory_limits( // Inspect the module to extract the initial and maximum page count. let limits = memory_type.limits(); match (limits.initial(), limits.maximum()) { - (initial, Some(maximum)) if initial > maximum => { - Err("Requested initial number of pages should not exceed the requested maximum") - }, - (_, Some(maximum)) if maximum > schedule.limits.memory_pages => { - Err("Maximum number of pages should not exceed the configured maximum.") - }, + (initial, Some(maximum)) if initial > maximum => + Err("Requested initial number of pages should not exceed the requested maximum"), + (_, Some(maximum)) if maximum > schedule.limits.memory_pages => + Err("Maximum number of pages should not exceed the configured maximum."), (initial, Some(maximum)) => Ok((initial, maximum)), (_, None) => { // Maximum number of pages should be always declared. diff --git a/frame/contracts/src/wasm/runtime.rs b/frame/contracts/src/wasm/runtime.rs index 28dd064b703d5..4c6006d2612fe 100644 --- a/frame/contracts/src/wasm/runtime.rs +++ b/frame/contracts/src/wasm/runtime.rs @@ -314,9 +314,8 @@ impl RuntimeCosts { ContainsStorage(len) => s .contains_storage .saturating_add(s.contains_storage_per_byte.saturating_mul(len.into())), - GetStorage(len) => { - s.get_storage.saturating_add(s.get_storage_per_byte.saturating_mul(len.into())) - }, + GetStorage(len) => + s.get_storage.saturating_add(s.get_storage_per_byte.saturating_mul(len.into())), #[cfg(feature = "unstable-interface")] TakeStorage(len) => s .take_storage @@ -661,14 +660,14 @@ where create_token: impl FnOnce(u32) -> Option, ) -> Result<(), DispatchError> { if allow_skip && out_ptr == SENTINEL { - return Ok(()); + return Ok(()) } let buf_len = buf.len() as u32; let len: u32 = self.read_sandbox_memory_as(memory, out_len_ptr)?; if len < buf_len { - return Err(Error::::OutputBufferTooSmall.into()); + return Err(Error::::OutputBufferTooSmall.into()) } if let Some(costs) = create_token(buf_len) { @@ -773,7 +772,7 @@ where let charged = self .charge_gas(RuntimeCosts::SetStorage { new_bytes: value_len, old_bytes: max_size })?; if value_len > max_size { - return Err(Error::::ValueTooLarge.into()); + return Err(Error::::ValueTooLarge.into()) } let key = self.read_sandbox_memory(memory, key_ptr, key_type.len::()?)?; let value = Some(self.read_sandbox_memory(memory, value_ptr, value_len)?); @@ -920,7 +919,7 @@ where }, CallType::DelegateCall { code_hash_ptr } => { if flags.contains(CallFlags::ALLOW_REENTRY) { - return Err(Error::::InvalidCallFlags.into()); + return Err(Error::::InvalidCallFlags.into()) } let code_hash = self.read_sandbox_memory_as(memory, code_hash_ptr)?; self.ext.delegate_call(code_hash, input_data) @@ -934,7 +933,7 @@ where return Err(TrapReason::Return(ReturnData { flags: return_value.flags.bits(), data: return_value.data, - })); + })) } } @@ -1913,7 +1912,7 @@ pub mod env { ) -> Result<(), TrapReason> { ctx.charge_gas(RuntimeCosts::Random)?; if subject_len > ctx.ext.schedule().limits.subject_len { - return Err(Error::::RandomSubjectTooLong.into()); + return Err(Error::::RandomSubjectTooLong.into()) } let subject_buf = ctx.read_sandbox_memory(memory, subject_ptr, subject_len)?; Ok(ctx.write_sandbox_output( @@ -1959,7 +1958,7 @@ pub mod env { ) -> Result<(), TrapReason> { ctx.charge_gas(RuntimeCosts::Random)?; if subject_len > ctx.ext.schedule().limits.subject_len { - return Err(Error::::RandomSubjectTooLong.into()); + return Err(Error::::RandomSubjectTooLong.into()) } let subject_buf = ctx.read_sandbox_memory(memory, subject_ptr, subject_len)?; Ok(ctx.write_sandbox_output( @@ -2116,7 +2115,7 @@ pub mod env { .ok_or("Zero sized topics are not allowed")?; ctx.charge_gas(RuntimeCosts::DepositEvent { num_topic, len: data_len })?; if data_len > ctx.ext.max_value_size() { - return Err(Error::::ValueTooLarge.into()); + return Err(Error::::ValueTooLarge.into()) } let mut topics: Vec::T>> = match topics_len { @@ -2126,14 +2125,14 @@ pub mod env { // If there are more than `event_topics`, then trap. if topics.len() > ctx.ext.schedule().limits.event_topics as usize { - return Err(Error::::TooManyTopics.into()); + return Err(Error::::TooManyTopics.into()) } // Check for duplicate topics. If there are any, then trap. // Complexity O(n * log(n)) and no additional allocations. // This also sorts the topics. if has_duplicates(&mut topics) { - return Err(Error::::DuplicateTopics.into()); + return Err(Error::::DuplicateTopics.into()) } let event_data = ctx.read_sandbox_memory(memory, data_ptr, data_len)?; @@ -2359,7 +2358,7 @@ pub mod env { ) -> Result { use crate::chain_extension::{ChainExtension, Environment, RetVal}; if !::ChainExtension::enabled() { - return Err(Error::::NoChainExtension.into()); + return Err(Error::::NoChainExtension.into()) } let mut chain_extension = ctx.chain_extension.take().expect( "Constructor initializes with `Some`. This is the only place where it is set to `None`.\ @@ -2369,9 +2368,8 @@ pub mod env { Environment::new(ctx, memory, id, input_ptr, input_len, output_ptr, output_len_ptr); let ret = match chain_extension.call(env)? { RetVal::Converging(val) => Ok(val), - RetVal::Diverging { flags, data } => { - Err(TrapReason::Return(ReturnData { flags: flags.bits(), data })) - }, + RetVal::Diverging { flags, data } => + Err(TrapReason::Return(ReturnData { flags: flags.bits(), data })), }; ctx.chain_extension = Some(chain_extension); ret @@ -2407,7 +2405,7 @@ pub mod env { let msg = core::str::from_utf8(&data).map_err(|_| >::DebugMessageInvalidUTF8)?; ctx.ext.append_debug_buffer(msg); - return Ok(ReturnCode::Success); + return Ok(ReturnCode::Success) } Ok(ReturnCode::LoggingDisabled) } diff --git a/frame/conviction-voting/src/lib.rs b/frame/conviction-voting/src/lib.rs index 708fc413d7bcb..3ecc6e56be94e 100644 --- a/frame/conviction-voting/src/lib.rs +++ b/frame/conviction-voting/src/lib.rs @@ -409,7 +409,7 @@ impl, I: 'static> Pallet { tally.increase(approve, *delegations); } } else { - return Err(Error::::AlreadyDelegating.into()); + return Err(Error::::AlreadyDelegating.into()) } // Extend the lock to `balance` (rather than setting it) since we don't know what // other votes are in place. @@ -557,9 +557,8 @@ impl, I: 'static> Pallet { }), ); match old { - Voting::Delegating(Delegating { .. }) => { - return Err(Error::::AlreadyDelegating.into()) - }, + Voting::Delegating(Delegating { .. }) => + return Err(Error::::AlreadyDelegating.into()), Voting::Casting(Casting { votes, delegations, prior }) => { // here we just ensure that we're currently idling with no votes recorded. ensure!(votes.is_empty(), Error::::AlreadyVoting); diff --git a/frame/conviction-voting/src/tests.rs b/frame/conviction-voting/src/tests.rs index ed65fdf8fa03b..7a3f80442014a 100644 --- a/frame/conviction-voting/src/tests.rs +++ b/frame/conviction-voting/src/tests.rs @@ -139,9 +139,8 @@ impl Polling> for TestPolls { let mut polls = Polls::get(); let entry = polls.get_mut(&index); let r = match entry { - Some(Ongoing(ref mut tally_mut_ref, class)) => { - f(PollStatus::Ongoing(tally_mut_ref, *class)) - }, + Some(Ongoing(ref mut tally_mut_ref, class)) => + f(PollStatus::Ongoing(tally_mut_ref, *class)), Some(Completed(when, succeeded)) => f(PollStatus::Completed(*when, *succeeded)), None => f(PollStatus::None), }; @@ -155,9 +154,8 @@ impl Polling> for TestPolls { let mut polls = Polls::get(); let entry = polls.get_mut(&index); let r = match entry { - Some(Ongoing(ref mut tally_mut_ref, class)) => { - f(PollStatus::Ongoing(tally_mut_ref, *class)) - }, + Some(Ongoing(ref mut tally_mut_ref, class)) => + f(PollStatus::Ongoing(tally_mut_ref, *class)), Some(Completed(when, succeeded)) => f(PollStatus::Completed(*when, *succeeded)), None => f(PollStatus::None), }?; diff --git a/frame/conviction-voting/src/vote.rs b/frame/conviction-voting/src/vote.rs index f8b9e57bdcda5..a8e012b6c97a1 100644 --- a/frame/conviction-voting/src/vote.rs +++ b/frame/conviction-voting/src/vote.rs @@ -83,9 +83,8 @@ impl AccountVote { // winning side: can only be removed after the lock period ends. match self { AccountVote::Standard { vote: Vote { conviction: Conviction::None, .. }, .. } => None, - AccountVote::Standard { vote, balance } if vote.aye == approved => { - Some((vote.conviction.lock_periods(), balance)) - }, + AccountVote::Standard { vote, balance } if vote.aye == approved => + Some((vote.conviction.lock_periods(), balance)), _ => None, } } @@ -237,9 +236,8 @@ where /// The amount of this account's balance that must currently be locked due to voting. pub fn locked_balance(&self) -> Balance { match self { - Voting::Casting(Casting { votes, prior, .. }) => { - votes.iter().map(|i| i.1.balance()).fold(prior.locked(), |a, i| a.max(i)) - }, + Voting::Casting(Casting { votes, prior, .. }) => + votes.iter().map(|i| i.1.balance()).fold(prior.locked(), |a, i| a.max(i)), Voting::Delegating(Delegating { balance, prior, .. }) => *balance.max(&prior.locked()), } } @@ -250,12 +248,10 @@ where prior: PriorLock, ) { let (d, p) = match self { - Voting::Casting(Casting { ref mut delegations, ref mut prior, .. }) => { - (delegations, prior) - }, - Voting::Delegating(Delegating { ref mut delegations, ref mut prior, .. }) => { - (delegations, prior) - }, + Voting::Casting(Casting { ref mut delegations, ref mut prior, .. }) => + (delegations, prior), + Voting::Delegating(Delegating { ref mut delegations, ref mut prior, .. }) => + (delegations, prior), }; *d = delegations; *p = prior; diff --git a/frame/democracy/src/lib.rs b/frame/democracy/src/lib.rs index 7626fcd362b11..cf954d4800eee 100644 --- a/frame/democracy/src/lib.rs +++ b/frame/democracy/src/lib.rs @@ -790,7 +790,7 @@ pub mod pallet { if let Some((ext_proposal, _)) = NextExternal::::get() { ensure!(proposal_hash == ext_proposal.hash(), Error::::ProposalMissing); } else { - return Err(Error::::NoProposal.into()); + return Err(Error::::NoProposal.into()) } let mut existing_vetoers = @@ -1422,7 +1422,7 @@ impl Pallet { ); Ok(()) } else { - return Err(Error::::NoneWaiting.into()); + return Err(Error::::NoneWaiting.into()) } } @@ -1451,7 +1451,7 @@ impl Pallet { } Ok(()) } else { - return Err(Error::::NoneWaiting.into()); + return Err(Error::::NoneWaiting.into()) } } @@ -1541,8 +1541,8 @@ impl Pallet { // of unbaked referendum is bounded by this number. In case those number have changed in a // runtime upgrade the formula should be adjusted but the bound should still be sensible. >::mutate(|ref_index| { - while *ref_index < last - && Self::referendum_info(*ref_index) + while *ref_index < last && + Self::referendum_info(*ref_index) .map_or(true, |info| matches!(info, ReferendumInfo::Finished { .. })) { *ref_index += 1 diff --git a/frame/democracy/src/migrations.rs b/frame/democracy/src/migrations.rs index 09754c65fea09..3ec249c1d981c 100644 --- a/frame/democracy/src/migrations.rs +++ b/frame/democracy/src/migrations.rs @@ -83,7 +83,7 @@ pub mod v1 { "skipping on_runtime_upgrade: executed on wrong storage version.\ Expected version 0" ); - return weight; + return weight } ReferendumInfoOf::::translate( @@ -91,18 +91,16 @@ pub mod v1 { weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 1)); log::info!(target: TARGET, "migrating referendum #{:?}", &index); Some(match old { - ReferendumInfo::Ongoing(status) => { + ReferendumInfo::Ongoing(status) => ReferendumInfo::Ongoing(ReferendumStatus { end: status.end, proposal: Bounded::from_legacy_hash(status.proposal), threshold: status.threshold, delay: status.delay, tally: status.tally, - }) - }, - ReferendumInfo::Finished { approved, end } => { - ReferendumInfo::Finished { approved, end } - }, + }), + ReferendumInfo::Finished { approved, end } => + ReferendumInfo::Finished { approved, end }, }) }, ); diff --git a/frame/democracy/src/vote.rs b/frame/democracy/src/vote.rs index 8d6c97fe7d0f8..122f54febd8cf 100644 --- a/frame/democracy/src/vote.rs +++ b/frame/democracy/src/vote.rs @@ -88,9 +88,8 @@ impl AccountVote { pub fn locked_if(self, approved: bool) -> Option<(u32, Balance)> { // winning side: can only be removed after the lock period ends. match self { - AccountVote::Standard { vote, balance } if vote.aye == approved => { - Some((vote.conviction.lock_periods(), balance)) - }, + AccountVote::Standard { vote, balance } if vote.aye == approved => + Some((vote.conviction.lock_periods(), balance)), _ => None, } } @@ -206,9 +205,8 @@ impl< /// The amount of this account's balance that much currently be locked due to voting. pub fn locked_balance(&self) -> Balance { match self { - Voting::Direct { votes, prior, .. } => { - votes.iter().map(|i| i.1.balance()).fold(prior.locked(), |a, i| a.max(i)) - }, + Voting::Direct { votes, prior, .. } => + votes.iter().map(|i| i.1.balance()).fold(prior.locked(), |a, i| a.max(i)), Voting::Delegating { balance, prior, .. } => *balance.max(&prior.locked()), } } diff --git a/frame/democracy/src/vote_threshold.rs b/frame/democracy/src/vote_threshold.rs index c48de77bc11fb..e8ef91def9820 100644 --- a/frame/democracy/src/vote_threshold.rs +++ b/frame/democracy/src/vote_threshold.rs @@ -60,18 +60,18 @@ fn compare_rationals< let q1 = n1 / d1; let q2 = n2 / d2; if q1 < q2 { - return true; + return true } if q2 < q1 { - return false; + return false } let r1 = n1 % d1; let r2 = n2 % d2; if r2.is_zero() { - return false; + return false } if r1.is_zero() { - return true; + return true } n1 = d2; n2 = d1; @@ -95,15 +95,13 @@ impl< let sqrt_voters = tally.turnout.integer_sqrt(); let sqrt_electorate = electorate.integer_sqrt(); if sqrt_voters.is_zero() { - return false; + return false } match *self { - VoteThreshold::SuperMajorityApprove => { - compare_rationals(tally.nays, sqrt_voters, tally.ayes, sqrt_electorate) - }, - VoteThreshold::SuperMajorityAgainst => { - compare_rationals(tally.nays, sqrt_electorate, tally.ayes, sqrt_voters) - }, + VoteThreshold::SuperMajorityApprove => + compare_rationals(tally.nays, sqrt_voters, tally.ayes, sqrt_electorate), + VoteThreshold::SuperMajorityAgainst => + compare_rationals(tally.nays, sqrt_electorate, tally.ayes, sqrt_voters), VoteThreshold::SimpleMajority => tally.ayes > tally.nays, } } diff --git a/frame/election-provider-multi-phase/src/lib.rs b/frame/election-provider-multi-phase/src/lib.rs index 472330159c08b..2d49cd79dbcad 100644 --- a/frame/election-provider-multi-phase/src/lib.rs +++ b/frame/election-provider-multi-phase/src/lib.rs @@ -1406,7 +1406,7 @@ impl Pallet { .map_err(ElectionError::DataProvider)?; if targets.len() > target_limit || voters.len() > voter_limit { - return Err(ElectionError::DataProvider("Snapshot too big for submission.")); + return Err(ElectionError::DataProvider("Snapshot too big for submission.")) } let mut desired_targets = @@ -1532,7 +1532,7 @@ impl Pallet { // Check that all of the targets are valid based on the snapshot. if assignment.distribution.iter().any(|(d, _)| !targets.contains(d)) { - return Err(FeasibilityError::InvalidVote); + return Err(FeasibilityError::InvalidVote) } Ok(()) })?; @@ -2469,8 +2469,8 @@ mod tests { let mut active = 1; while weight_with(active) - .all_lte(::BlockWeights::get().max_block) - || active == all_voters + .all_lte(::BlockWeights::get().max_block) || + active == all_voters { active += 1; } diff --git a/frame/election-provider-multi-phase/src/mock.rs b/frame/election-provider-multi-phase/src/mock.rs index 933f3afedfbea..8ab7e5bbf733d 100644 --- a/frame/election-provider-multi-phase/src/mock.rs +++ b/frame/election-provider-multi-phase/src/mock.rs @@ -366,12 +366,10 @@ impl MinerConfig for Runtime { MockedWeightInfo::Basic => Weight::from_ref_time( (10 as u64).saturating_add((5 as u64).saturating_mul(a as u64)), ), - MockedWeightInfo::Complex => { - Weight::from_ref_time((0 * v + 0 * t + 1000 * a + 0 * d) as u64) - }, - MockedWeightInfo::Real => { - <() as multi_phase::weights::WeightInfo>::feasibility_check(v, t, a, d) - }, + MockedWeightInfo::Complex => + Weight::from_ref_time((0 * v + 0 * t + 1000 * a + 0 * d) as u64), + MockedWeightInfo::Real => + <() as multi_phase::weights::WeightInfo>::feasibility_check(v, t, a, d), } } } @@ -437,10 +435,10 @@ impl ElectionDataProvider for StakingMock { fn electable_targets(maybe_max_len: Option) -> data_provider::Result> { let targets = Targets::get(); - if !DataProviderAllowBadData::get() - && maybe_max_len.map_or(false, |max_len| targets.len() > max_len) + if !DataProviderAllowBadData::get() && + maybe_max_len.map_or(false, |max_len| targets.len() > max_len) { - return Err("Targets too big"); + return Err("Targets too big") } Ok(targets) diff --git a/frame/election-provider-multi-phase/src/signed.rs b/frame/election-provider-multi-phase/src/signed.rs index 2b92dc9263ad8..9d629ad77fd79 100644 --- a/frame/election-provider-multi-phase/src/signed.rs +++ b/frame/election-provider-multi-phase/src/signed.rs @@ -219,7 +219,7 @@ impl SignedSubmissions { insert: Option<(ElectionScore, T::BlockNumber, u32)>, ) -> Option> { if remove_pos >= self.indices.len() { - return None; + return None } // safe: index was just checked in the line above. @@ -319,7 +319,7 @@ impl SignedSubmissions { // if we haven't improved on the weakest score, don't change anything. if !submission.raw_solution.score.strict_threshold_better(weakest_score, threshold) { - return InsertResult::NotInserted; + return InsertResult::NotInserted } self.swap_out_submission( @@ -410,7 +410,7 @@ impl Pallet { weight = weight .saturating_add(T::WeightInfo::finalize_signed_phase_accept_solution()); - break; + break }, Err(_) => { log!(warn, "finalized_signed: invalid signed submission found, slashing."); diff --git a/frame/election-provider-multi-phase/src/unsigned.rs b/frame/election-provider-multi-phase/src/unsigned.rs index 1549be4d80eba..7340605dfe621 100644 --- a/frame/election-provider-multi-phase/src/unsigned.rs +++ b/frame/election-provider-multi-phase/src/unsigned.rs @@ -109,9 +109,8 @@ fn save_solution(call: &Call) -> Result<(), MinerError> { let storage = StorageValueRef::persistent(OFFCHAIN_CACHED_CALL); match storage.mutate::<_, (), _>(|_| Ok(call.clone())) { Ok(_) => Ok(()), - Err(MutateStorageError::ConcurrentModification(_)) => { - Err(MinerError::FailedToStoreSolution) - }, + Err(MutateStorageError::ConcurrentModification(_)) => + Err(MinerError::FailedToStoreSolution), Err(MutateStorageError::ValueFunctionFailed(_)) => { // this branch should be unreachable according to the definition of // `StorageValueRef::mutate`: that function should only ever `Err` if the closure we @@ -302,9 +301,8 @@ impl Pallet { |maybe_head: Result, _>| { match maybe_head { Ok(Some(head)) if now < head => Err("fork."), - Ok(Some(head)) if now >= head && now <= head + threshold => { - Err("recently executed.") - }, + Ok(Some(head)) if now >= head && now <= head + threshold => + Err("recently executed."), Ok(Some(head)) if now > head + threshold => { // we can run again now. Write the new head. Ok(now) @@ -321,9 +319,8 @@ impl Pallet { // all good Ok(_) => Ok(()), // failed to write. - Err(MutateStorageError::ConcurrentModification(_)) => { - Err(MinerError::Lock("failed to write to offchain db (concurrent modification).")) - }, + Err(MutateStorageError::ConcurrentModification(_)) => + Err(MinerError::Lock("failed to write to offchain db (concurrent modification).")), // fork etc. Err(MutateStorageError::ValueFunctionFailed(why)) => Err(MinerError::Lock(why)), } @@ -347,8 +344,8 @@ impl Pallet { // ensure correct number of winners. ensure!( - Self::desired_targets().unwrap_or_default() - == raw_solution.solution.unique_targets().len() as u32, + Self::desired_targets().unwrap_or_default() == + raw_solution.solution.unique_targets().len() as u32, Error::::PreDispatchWrongWinnerCount, ); @@ -539,7 +536,7 @@ impl Miner { // not much we can do if assignments are already empty. if high == low { - return Ok(()); + return Ok(()) } while high - low > 1 { @@ -550,8 +547,8 @@ impl Miner { high = test; } } - let maximum_allowed_voters = if low < assignments.len() - && encoded_size_of(&assignments[..low + 1])? <= max_allowed_length + let maximum_allowed_voters = if low < assignments.len() && + encoded_size_of(&assignments[..low + 1])? <= max_allowed_length { low + 1 } else { @@ -563,8 +560,8 @@ impl Miner { encoded_size_of(&assignments[..maximum_allowed_voters]).unwrap() <= max_allowed_length ); debug_assert!(if maximum_allowed_voters < assignments.len() { - encoded_size_of(&assignments[..maximum_allowed_voters + 1]).unwrap() - > max_allowed_length + encoded_size_of(&assignments[..maximum_allowed_voters + 1]).unwrap() > + max_allowed_length } else { true }); @@ -629,7 +626,7 @@ impl Miner { max_weight: Weight, ) -> u32 { if size.voters < 1 { - return size.voters; + return size.voters } let max_voters = size.voters.max(1); diff --git a/frame/election-provider-support/solution-type/src/lib.rs b/frame/election-provider-support/solution-type/src/lib.rs index 1cae99affce03..0a5c11e76dedb 100644 --- a/frame/election-provider-support/solution-type/src/lib.rs +++ b/frame/election-provider-support/solution-type/src/lib.rs @@ -155,10 +155,10 @@ fn check_attributes(input: ParseStream) -> syn::Result { return Err(syn::Error::new_spanned( extra_attr, "compact solution can accept only #[compact]", - )); + )) } if attrs.is_empty() { - return Ok(false); + return Ok(false) } let attr = attrs.pop().expect("attributes vec with len 1 can be popped."); if attr.path.is_ident("compact") { @@ -183,7 +183,7 @@ impl Parse for SolutionDef { let generics: syn::AngleBracketedGenericArguments = input.parse()?; if generics.args.len() != 4 { - return Err(syn_err("Must provide 4 generic args.")); + return Err(syn_err("Must provide 4 generic args.")) } let expected_types = ["VoterIndex", "TargetIndex", "Accuracy", "MaxVoters"]; @@ -267,7 +267,7 @@ mod tests { fn ui_fail() { // Only run the ui tests when `RUN_UI_TESTS` is set. if std::env::var("RUN_UI_TESTS").is_err() { - return; + return } let cases = trybuild::TestCases::new(); diff --git a/frame/election-provider-support/solution-type/src/single_page.rs b/frame/election-provider-support/solution-type/src/single_page.rs index d231b2ceb10b1..a7ccf5085d2b1 100644 --- a/frame/election-provider-support/solution-type/src/single_page.rs +++ b/frame/election-provider-support/solution-type/src/single_page.rs @@ -33,7 +33,7 @@ pub(crate) fn generate(def: crate::SolutionDef) -> Result { } = def; if count <= 2 { - return Err(syn_err("cannot build solution struct with capacity less than 3.")); + return Err(syn_err("cannot build solution struct with capacity less than 3.")) } let single = { diff --git a/frame/election-provider-support/src/lib.rs b/frame/election-provider-support/src/lib.rs index 844ba6a89a85a..38924a18e2f54 100644 --- a/frame/election-provider-support/src/lib.rs +++ b/frame/election-provider-support/src/lib.rs @@ -387,13 +387,12 @@ pub trait ElectionProviderBase { /// [`Self::MaxWinners`]. fn desired_targets_checked() -> data_provider::Result { match Self::DataProvider::desired_targets() { - Ok(desired_targets) => { + Ok(desired_targets) => if desired_targets <= Self::MaxWinners::get() { Ok(desired_targets) } else { Err("desired_targets should not be greater than MaxWinners") - } - }, + }, Err(e) => Err(e), } } diff --git a/frame/election-provider-support/src/mock.rs b/frame/election-provider-support/src/mock.rs index d75fe6250509b..7c834f06f3cdf 100644 --- a/frame/election-provider-support/src/mock.rs +++ b/frame/election-provider-support/src/mock.rs @@ -118,7 +118,7 @@ pub fn generate_random_votes( // distribute the available stake randomly let stake_distribution = if num_chosen_winners == 0 { - continue; + continue } else { let mut available_stake = 1000; let mut stake_distribution = Vec::with_capacity(num_chosen_winners); diff --git a/frame/election-provider-support/src/onchain.rs b/frame/election-provider-support/src/onchain.rs index b5276205ae647..483c402fe249c 100644 --- a/frame/election-provider-support/src/onchain.rs +++ b/frame/election-provider-support/src/onchain.rs @@ -111,7 +111,7 @@ fn elect_with_input_bounds( if desired_targets > T::MaxWinners::get() { // early exit - return Err(Error::TooManyWinners); + return Err(Error::TooManyWinners) } let voters_len = voters.len() as u32; diff --git a/frame/elections-phragmen/src/lib.rs b/frame/elections-phragmen/src/lib.rs index 9c7f7c3f0513d..165a8fcab429b 100644 --- a/frame/elections-phragmen/src/lib.rs +++ b/frame/elections-phragmen/src/lib.rs @@ -905,7 +905,7 @@ impl Pallet { if candidates_and_deposit.len().is_zero() { Self::deposit_event(Event::EmptyTerm); - return T::DbWeight::get().reads(3); + return T::DbWeight::get().reads(3) } // All of the new winners that come out of phragmen will thus have a deposit recorded. @@ -937,7 +937,7 @@ impl Pallet { "Failed to run election. Number of voters exceeded", ); Self::deposit_event(Event::ElectionError); - return T::DbWeight::get().reads(3 + max_voters as u64); + return T::DbWeight::get().reads(3 + max_voters as u64) }, } @@ -1044,8 +1044,8 @@ impl Pallet { // All candidates/members/runners-up who are no longer retaining a position as a // seat holder will lose their bond. candidates_and_deposit.iter().for_each(|(c, d)| { - if new_members_ids_sorted.binary_search(c).is_err() - && new_runners_up_ids_sorted.binary_search(c).is_err() + if new_members_ids_sorted.binary_search(c).is_err() && + new_runners_up_ids_sorted.binary_search(c).is_err() { let (imbalance, _) = T::Currency::slash_reserved(c, *d); T::LoserCandidate::on_unbalanced(imbalance); diff --git a/frame/elections-phragmen/src/migrations/v4.rs b/frame/elections-phragmen/src/migrations/v4.rs index 4a840a7ab9ce8..76ef630706c50 100644 --- a/frame/elections-phragmen/src/migrations/v4.rs +++ b/frame/elections-phragmen/src/migrations/v4.rs @@ -38,7 +38,7 @@ pub fn migrate>(new_pallet_name: N) -> Weight { target: "runtime::elections-phragmen", "New pallet name is equal to the old prefix. No migration needs to be done.", ); - return Weight::zero(); + return Weight::zero() } let storage_version = StorageVersion::get::>(); log::info!( diff --git a/frame/examples/basic/src/lib.rs b/frame/examples/basic/src/lib.rs index 6d30580ebac17..256529421caae 100644 --- a/frame/examples/basic/src/lib.rs +++ b/frame/examples/basic/src/lib.rs @@ -750,7 +750,7 @@ where ) -> TransactionValidity { // if the transaction is too big, just drop it. if len > 200 { - return InvalidTransaction::ExhaustsResources.into(); + return InvalidTransaction::ExhaustsResources.into() } // check for `set_dummy` diff --git a/frame/examples/offchain-worker/src/lib.rs b/frame/examples/offchain-worker/src/lib.rs index 243039a163104..fdf8b61a01acd 100644 --- a/frame/examples/offchain-worker/src/lib.rs +++ b/frame/examples/offchain-worker/src/lib.rs @@ -199,12 +199,10 @@ pub mod pallet { let should_send = Self::choose_transaction_type(block_number); let res = match should_send { TransactionType::Signed => Self::fetch_price_and_send_signed(), - TransactionType::UnsignedForAny => { - Self::fetch_price_and_send_unsigned_for_any_account(block_number) - }, - TransactionType::UnsignedForAll => { - Self::fetch_price_and_send_unsigned_for_all_accounts(block_number) - }, + TransactionType::UnsignedForAny => + Self::fetch_price_and_send_unsigned_for_any_account(block_number), + TransactionType::UnsignedForAll => + Self::fetch_price_and_send_unsigned_for_all_accounts(block_number), TransactionType::Raw => Self::fetch_price_and_send_raw_unsigned(block_number), TransactionType::None => Ok(()), }; @@ -316,7 +314,7 @@ pub mod pallet { let signature_valid = SignedPayload::::verify::(payload, signature.clone()); if !signature_valid { - return InvalidTransaction::BadProof.into(); + return InvalidTransaction::BadProof.into() } Self::validate_transaction_parameters(&payload.block_number, &payload.price) } else if let Call::submit_price_unsigned { block_number, price: new_price } = call { @@ -393,9 +391,8 @@ impl Pallet { match last_send { // If we already have a value in storage and the block number is recent enough // we avoid sending another transaction at this time. - Ok(Some(block)) if block_number < block + T::GracePeriod::get() => { - Err(RECENTLY_SENT) - }, + Ok(Some(block)) if block_number < block + T::GracePeriod::get() => + Err(RECENTLY_SENT), // In every other case we attempt to acquire the lock and send a transaction. _ => Ok(block_number), } @@ -446,7 +443,7 @@ impl Pallet { if !signer.can_sign() { return Err( "No local accounts available. Consider adding one via `author_insertKey` RPC.", - ); + ) } // Make an external HTTP request to fetch the current price. // Note this call will block until response is received. @@ -479,7 +476,7 @@ impl Pallet { // anyway. let next_unsigned_at = >::get(); if next_unsigned_at > block_number { - return Err("Too early to send unsigned transaction"); + return Err("Too early to send unsigned transaction") } // Make an external HTTP request to fetch the current price. @@ -513,7 +510,7 @@ impl Pallet { // anyway. let next_unsigned_at = >::get(); if next_unsigned_at > block_number { - return Err("Too early to send unsigned transaction"); + return Err("Too early to send unsigned transaction") } // Make an external HTTP request to fetch the current price. @@ -543,7 +540,7 @@ impl Pallet { // anyway. let next_unsigned_at = >::get(); if next_unsigned_at > block_number { - return Err("Too early to send unsigned transaction"); + return Err("Too early to send unsigned transaction") } // Make an external HTTP request to fetch the current price. @@ -561,7 +558,7 @@ impl Pallet { ); for (_account_id, result) in transaction_results.into_iter() { if result.is_err() { - return Err("Unable to submit transaction"); + return Err("Unable to submit transaction") } } @@ -597,7 +594,7 @@ impl Pallet { // Let's check the status code before we proceed to reading the response. if response.code != 200 { log::warn!("Unexpected status code: {}", response.code); - return Err(http::Error::Unknown); + return Err(http::Error::Unknown) } // Next we want to fully read the response body and collect it to a vector of bytes. @@ -677,12 +674,12 @@ impl Pallet { // Now let's check if the transaction has any chance to succeed. let next_unsigned_at = >::get(); if &next_unsigned_at > block_number { - return InvalidTransaction::Stale.into(); + return InvalidTransaction::Stale.into() } // Let's make sure to reject transactions from the future. let current_block = >::block_number(); if ¤t_block < block_number { - return InvalidTransaction::Future.into(); + return InvalidTransaction::Future.into() } // We prioritize transactions that are more far away from current average. diff --git a/frame/executive/src/lib.rs b/frame/executive/src/lib.rs index ace82366684da..b7884efccf685 100644 --- a/frame/executive/src/lib.rs +++ b/frame/executive/src/lib.rs @@ -386,9 +386,9 @@ where // Check that `parent_hash` is correct. let n = *header.number(); assert!( - n > System::BlockNumber::zero() - && >::block_hash(n - System::BlockNumber::one()) - == *header.parent_hash(), + n > System::BlockNumber::zero() && + >::block_hash(n - System::BlockNumber::one()) == + *header.parent_hash(), "Parent hash should be valid.", ); @@ -895,8 +895,8 @@ mod tests { .assimilate_storage(&mut t) .unwrap(); let xt = TestXt::new(call_transfer(2, 69), sign_extra(1, 0, 0)); - let weight = xt.get_dispatch_info().weight - + ::BlockWeights::get() + let weight = xt.get_dispatch_info().weight + + ::BlockWeights::get() .get(DispatchClass::Normal) .base_extrinsic; let fee: Balance = @@ -1092,8 +1092,8 @@ mod tests { let mut t = new_test_ext(1); t.execute_with(|| { // Block execution weight + on_initialize weight from custom module - let base_block_weight = Weight::from_ref_time(175) - + ::BlockWeights::get().base_block; + let base_block_weight = Weight::from_ref_time(175) + + ::BlockWeights::get().base_block; Executive::initialize_block(&Header::new( 1, @@ -1111,8 +1111,8 @@ mod tests { assert!(Executive::apply_extrinsic(x2.clone()).unwrap().is_ok()); // default weight for `TestXt` == encoded length. - let extrinsic_weight = Weight::from_ref_time(len as u64) - + ::BlockWeights::get() + let extrinsic_weight = Weight::from_ref_time(len as u64) + + ::BlockWeights::get() .get(DispatchClass::Normal) .base_extrinsic; assert_eq!( @@ -1183,8 +1183,8 @@ mod tests { RuntimeCall::System(SystemCall::remark { remark: vec![1u8] }), sign_extra(1, 0, 0), ); - let weight = xt.get_dispatch_info().weight - + ::BlockWeights::get() + let weight = xt.get_dispatch_info().weight + + ::BlockWeights::get() .get(DispatchClass::Normal) .base_extrinsic; let fee: Balance = @@ -1428,10 +1428,9 @@ mod tests { // Weights are recorded correctly assert_eq!( frame_system::Pallet::::block_weight().total(), - custom_runtime_upgrade_weight - + runtime_upgrade_weight - + on_initialize_weight - + base_block_weight, + custom_runtime_upgrade_weight + + runtime_upgrade_weight + + on_initialize_weight + base_block_weight, ); }); } diff --git a/frame/fast-unstake/src/lib.rs b/frame/fast-unstake/src/lib.rs index 7d4aad8b4d94a..618afa63c2c4c 100644 --- a/frame/fast-unstake/src/lib.rs +++ b/frame/fast-unstake/src/lib.rs @@ -201,7 +201,7 @@ pub mod pallet { impl Hooks for Pallet { fn on_idle(_: T::BlockNumber, remaining_weight: Weight) -> Weight { if remaining_weight.any_lt(T::DbWeight::get().reads(2)) { - return Weight::from_ref_time(0); + return Weight::from_ref_time(0) } Self::do_on_idle(remaining_weight) @@ -321,7 +321,7 @@ pub mod pallet { pub(crate) fn do_on_idle(remaining_weight: Weight) -> Weight { let mut eras_to_check_per_block = ErasToCheckPerBlock::::get(); if eras_to_check_per_block.is_zero() { - return T::DbWeight::get().reads(1); + return T::DbWeight::get().reads(1) } // NOTE: here we're assuming that the number of validators has only ever increased, @@ -338,7 +338,7 @@ pub mod pallet { eras_to_check_per_block.saturating_dec(); if eras_to_check_per_block.is_zero() { log!(debug, "early existing because eras_to_check_per_block is zero"); - return T::DbWeight::get().reads(2); + return T::DbWeight::get().reads(2) } } @@ -347,7 +347,7 @@ pub mod pallet { // there is an ongoing election -- we better not do anything. Imagine someone is not // exposed anywhere in the last era, and the snapshot for the election is already // taken. In this time period, we don't want to accidentally unstake them. - return T::DbWeight::get().reads(2); + return T::DbWeight::get().reads(2) } let UnstakeRequest { stashes, mut checked } = match Head::::take().or_else(|| { @@ -365,7 +365,7 @@ pub mod pallet { }) { None => { // There's no `Head` and nothing in the `Queue`, nothing to do here. - return T::DbWeight::get().reads(4); + return T::DbWeight::get().reads(4) }, Some(head) => head, }; @@ -389,8 +389,8 @@ pub mod pallet { let unchecked_eras_to_check = { // get the last available `bonding_duration` eras up to current era in reverse // order. - let total_check_range = (current_era.saturating_sub(bonding_duration) - ..=current_era) + let total_check_range = (current_era.saturating_sub(bonding_duration)..= + current_era) .rev() .collect::>(); debug_assert!( @@ -471,7 +471,7 @@ pub mod pallet { ); match checked.try_extend(unchecked_eras_to_check.clone().into_iter()) { - Ok(_) => { + Ok(_) => if stashes.is_empty() { Self::deposit_event(Event::::BatchFinished); } else { @@ -479,8 +479,7 @@ pub mod pallet { Self::deposit_event(Event::::BatchChecked { eras: unchecked_eras_to_check, }); - } - }, + }, Err(_) => { // don't put the head back in -- there is an internal error in the pallet. Self::halt("checked is pruned via retain above") diff --git a/frame/fast-unstake/src/mock.rs b/frame/fast-unstake/src/mock.rs index 17d48c5a0fd5e..d66f4ba5663d9 100644 --- a/frame/fast-unstake/src/mock.rs +++ b/frame/fast-unstake/src/mock.rs @@ -251,8 +251,8 @@ impl ExtBuilder { (VALIDATOR_PREFIX..VALIDATOR_PREFIX + VALIDATORS_PER_ERA) .map(|v| { // for the sake of sanity, let's register this taker as an actual validator. - let others = (NOMINATOR_PREFIX - ..(NOMINATOR_PREFIX + NOMINATORS_PER_VALIDATOR_PER_ERA)) + let others = (NOMINATOR_PREFIX.. + (NOMINATOR_PREFIX + NOMINATORS_PER_VALIDATOR_PER_ERA)) .map(|n| IndividualExposure { who: n, value: 0 as Balance }) .collect::>(); (v, Exposure { total: 0, own: 0, others }) diff --git a/frame/gilt/src/lib.rs b/frame/gilt/src/lib.rs index 5bed1eb12dbe6..28a0f5fd56e67 100644 --- a/frame/gilt/src/lib.rs +++ b/frame/gilt/src/lib.rs @@ -598,7 +598,7 @@ pub mod pallet { QueueTotals::::mutate(|qs| { for duration in (1..=T::QueueCount::get()).rev() { if qs[duration as usize - 1].0 == 0 { - continue; + continue } let queue_index = duration as usize - 1; let expiry = @@ -644,14 +644,14 @@ pub mod pallet { bids_taken += 1; if remaining.is_zero() || bids_taken == max_bids { - break; + break } } queues_hit += 1; qs[queue_index].0 = q.len() as u32; }); if remaining.is_zero() || bids_taken == max_bids { - break; + break } } }); diff --git a/frame/grandpa/src/equivocation.rs b/frame/grandpa/src/equivocation.rs index 37b5d57faa7e1..181d22fba545c 100644 --- a/frame/grandpa/src/equivocation.rs +++ b/frame/grandpa/src/equivocation.rs @@ -215,7 +215,7 @@ impl Pallet { "rejecting unsigned report equivocation transaction because it is not local/in-block." ); - return InvalidTransaction::Call.into(); + return InvalidTransaction::Call.into() }, } diff --git a/frame/grandpa/src/lib.rs b/frame/grandpa/src/lib.rs index 23f81193c8d6e..fe5b9861853bf 100644 --- a/frame/grandpa/src/lib.rs +++ b/frame/grandpa/src/lib.rs @@ -465,7 +465,7 @@ impl Pallet { if forced.is_some() { if Self::next_forced().map_or(false, |next| next > scheduled_at) { - return Err(Error::::TooSoon.into()); + return Err(Error::::TooSoon.into()) } // only allow the next forced change when twice the window has passed since @@ -538,7 +538,7 @@ impl Pallet { // validate equivocation proof (check votes are different and // signatures are valid). if !sp_finality_grandpa::check_equivocation_proof(equivocation_proof) { - return Err(Error::::InvalidEquivocationProof.into()); + return Err(Error::::InvalidEquivocationProof.into()) } // fetch the current and previous sets last session index. on the @@ -557,12 +557,12 @@ impl Pallet { // check that the session id for the membership proof is within the // bounds of the set id reported in the equivocation. - if session_index > set_id_session_index - || previous_set_id_session_index + if session_index > set_id_session_index || + previous_set_id_session_index .map(|previous_index| session_index <= previous_index) .unwrap_or(false) { - return Err(Error::::InvalidEquivocationProof.into()); + return Err(Error::::InvalidEquivocationProof.into()) } // report to the offences module rewarding the sender. diff --git a/frame/grandpa/src/migrations/v4.rs b/frame/grandpa/src/migrations/v4.rs index 3746953e016a3..81dbd3bab4b67 100644 --- a/frame/grandpa/src/migrations/v4.rs +++ b/frame/grandpa/src/migrations/v4.rs @@ -37,7 +37,7 @@ pub fn migrate>(new_pallet_name: N) -> Weight { target: "runtime::afg", "New pallet name is equal to the old prefix. No migration needs to be done.", ); - return Weight::zero(); + return Weight::zero() } let storage_version = StorageVersion::get::>(); log::info!( diff --git a/frame/grandpa/src/tests.rs b/frame/grandpa/src/tests.rs index 2de1d46d606d4..626decd12821e 100644 --- a/frame/grandpa/src/tests.rs +++ b/frame/grandpa/src/tests.rs @@ -379,7 +379,7 @@ fn report_equivocation_current_set_works() { // check that the balances of all other validators are left intact. for validator in &validators { if *validator == equivocation_validator_id { - continue; + continue } assert_eq!(Balances::total_balance(validator), 10_000_000); @@ -458,7 +458,7 @@ fn report_equivocation_old_set_works() { // check that the balances of all other validators are left intact. for validator in &validators { if *validator == equivocation_validator_id { - continue; + continue } assert_eq!(Balances::total_balance(validator), 10_000_000); diff --git a/frame/identity/src/benchmarking.rs b/frame/identity/src/benchmarking.rs index 4629b950dd786..3584eb954b399 100644 --- a/frame/identity/src/benchmarking.rs +++ b/frame/identity/src/benchmarking.rs @@ -45,16 +45,14 @@ fn add_registrars(r: u32) -> Result<(), &'static str> { let registrar_origin = T::RegistrarOrigin::successful_origin(); Identity::::add_registrar(registrar_origin, registrar_lookup)?; Identity::::set_fee(RawOrigin::Signed(registrar.clone()).into(), i, 10u32.into())?; - let fields = IdentityFields( - IdentityField::Display - | IdentityField::Legal - | IdentityField::Web - | IdentityField::Riot - | IdentityField::Email - | IdentityField::PgpFingerprint - | IdentityField::Image - | IdentityField::Twitter, - ); + let fields = + IdentityFields( + IdentityField::Display | + IdentityField::Legal | IdentityField::Web | + IdentityField::Riot | IdentityField::Email | + IdentityField::PgpFingerprint | + IdentityField::Image | IdentityField::Twitter, + ); Identity::::set_fields(RawOrigin::Signed(registrar.clone()).into(), i, fields)?; } diff --git a/frame/identity/src/lib.rs b/frame/identity/src/lib.rs index 01cbcca63abe9..95f5a84d8abb7 100644 --- a/frame/identity/src/lib.rs +++ b/frame/identity/src/lib.rs @@ -546,16 +546,14 @@ pub mod pallet { let item = (reg_index, Judgement::FeePaid(registrar.fee)); match id.judgements.binary_search_by_key(®_index, |x| x.0) { - Ok(i) => { + Ok(i) => if id.judgements[i].1.is_sticky() { - return Err(Error::::StickyJudgement.into()); + return Err(Error::::StickyJudgement.into()) } else { id.judgements[i] = item - } - }, - Err(i) => { - id.judgements.try_insert(i, item).map_err(|_| Error::::TooManyRegistrars)? - }, + }, + Err(i) => + id.judgements.try_insert(i, item).map_err(|_| Error::::TooManyRegistrars)?, } T::Currency::reserve(&sender, registrar.fee)?; @@ -608,7 +606,7 @@ pub mod pallet { let fee = if let Judgement::FeePaid(fee) = id.judgements.remove(pos).1 { fee } else { - return Err(Error::::JudgementGiven.into()); + return Err(Error::::JudgementGiven.into()) }; let err_amount = T::Currency::unreserve(&sender, fee); @@ -785,7 +783,7 @@ pub mod pallet { let mut id = >::get(&target).ok_or(Error::::InvalidTarget)?; if T::Hashing::hash_of(&id.info) != identity { - return Err(Error::::JudgementForDifferentIdentity.into()); + return Err(Error::::JudgementForDifferentIdentity.into()) } let item = (reg_index, judgement); diff --git a/frame/identity/src/types.rs b/frame/identity/src/types.rs index 419fd05fd0ea2..b1f15da3b1117 100644 --- a/frame/identity/src/types.rs +++ b/frame/identity/src/types.rs @@ -396,9 +396,8 @@ impl< > Registration { pub(crate) fn total_deposit(&self) -> Balance { - self.deposit - + self - .judgements + self.deposit + + self.judgements .iter() .map(|(_, ref j)| if let Judgement::FeePaid(fee) = j { *fee } else { Zero::zero() }) .fold(Zero::zero(), |a, i| a + i) diff --git a/frame/im-online/src/lib.rs b/frame/im-online/src/lib.rs index 80ced4e4ec015..342522ff29b19 100644 --- a/frame/im-online/src/lib.rs +++ b/frame/im-online/src/lib.rs @@ -552,19 +552,19 @@ pub mod pallet { if let Call::heartbeat { heartbeat, signature } = call { if >::is_online(heartbeat.authority_index) { // we already received a heartbeat for this authority - return InvalidTransaction::Stale.into(); + return InvalidTransaction::Stale.into() } // check if session index from heartbeat is recent let current_session = T::ValidatorSet::session_index(); if heartbeat.session_index != current_session { - return InvalidTransaction::Stale.into(); + return InvalidTransaction::Stale.into() } // verify that the incoming (unverified) pubkey is actually an authority id let keys = Keys::::get(); if keys.len() as u32 != heartbeat.validators_len { - return InvalidTransaction::Custom(INVALID_VALIDATORS_LEN).into(); + return InvalidTransaction::Custom(INVALID_VALIDATORS_LEN).into() } let authority_id = match keys.get(heartbeat.authority_index as usize) { Some(id) => id, @@ -577,7 +577,7 @@ pub mod pallet { }); if !signature_valid { - return InvalidTransaction::BadProof.into(); + return InvalidTransaction::BadProof.into() } ValidTransaction::with_tag_prefix("ImOnline") @@ -621,7 +621,7 @@ impl Pallet { let current_validators = T::ValidatorSet::validators(); if authority_index >= current_validators.len() as u32 { - return false; + return false } let authority = ¤t_validators[authority_index as usize]; @@ -632,8 +632,8 @@ impl Pallet { fn is_online_aux(authority_index: AuthIndex, authority: &ValidatorId) -> bool { let current_session = T::ValidatorSet::session_index(); - ReceivedHeartbeats::::contains_key(¤t_session, &authority_index) - || AuthoredBlocks::::get(¤t_session, authority) != 0 + ReceivedHeartbeats::::contains_key(¤t_session, &authority_index) || + AuthoredBlocks::::get(¤t_session, authority) != 0 } /// Returns `true` if a heartbeat has been received for the authority at `authority_index` in @@ -683,8 +683,8 @@ impl Pallet { // haven't sent an heartbeat yet we'll send one unconditionally. the idea is to prevent // all nodes from sending the heartbeats at the same block and causing a temporary (but // deterministic) spike in transactions. - progress >= START_HEARTBEAT_FINAL_PERIOD - || progress >= START_HEARTBEAT_RANDOM_PERIOD && random_choice(progress) + progress >= START_HEARTBEAT_FINAL_PERIOD || + progress >= START_HEARTBEAT_RANDOM_PERIOD && random_choice(progress) } else { // otherwise we fallback to using the block number calculated at the beginning // of the session that should roughly correspond to the middle of the session @@ -693,7 +693,7 @@ impl Pallet { }; if !should_heartbeat { - return Err(OffchainErr::TooEarly); + return Err(OffchainErr::TooEarly) } let session_index = T::ValidatorSet::session_index(); @@ -735,7 +735,7 @@ impl Pallet { }; if Self::is_online(authority_index) { - return Err(OffchainErr::AlreadyOnline(authority_index)); + return Err(OffchainErr::AlreadyOnline(authority_index)) } // acquire lock for that authority at current heartbeat to make sure we don't @@ -801,16 +801,15 @@ impl Pallet { // we will re-send it. match status { // we are still waiting for inclusion. - Ok(Some(status)) if status.is_recent(session_index, now) => { - Err(OffchainErr::WaitingForInclusion(status.sent_at)) - }, + Ok(Some(status)) if status.is_recent(session_index, now) => + Err(OffchainErr::WaitingForInclusion(status.sent_at)), // attempt to set new status _ => Ok(HeartbeatStatus { session_index, sent_at: now }), } }, ); if let Err(MutateStorageError::ValueFunctionFailed(err)) = res { - return Err(err); + return Err(err) } let mut new_status = res.map_err(|_| OffchainErr::FailedToAcquireLock)?; diff --git a/frame/im-online/src/tests.rs b/frame/im-online/src/tests.rs index a4db3f104a824..366119278d836 100644 --- a/frame/im-online/src/tests.rs +++ b/frame/im-online/src/tests.rs @@ -137,9 +137,8 @@ fn heartbeat( signature: signature.clone(), }) .map_err(|e| match e { - TransactionValidityError::Invalid(InvalidTransaction::Custom(INVALID_VALIDATORS_LEN)) => { - "invalid validators len" - }, + TransactionValidityError::Invalid(InvalidTransaction::Custom(INVALID_VALIDATORS_LEN)) => + "invalid validators len", e @ _ => <&'static str>::from(e), })?; ImOnline::heartbeat(RuntimeOrigin::none(), heartbeat, signature) @@ -241,9 +240,8 @@ fn should_generate_heartbeats() { // check stuff about the transaction. let ex: Extrinsic = Decode::decode(&mut &*transaction).unwrap(); let heartbeat = match ex.call { - crate::mock::RuntimeCall::ImOnline(crate::Call::heartbeat { heartbeat, .. }) => { - heartbeat - }, + crate::mock::RuntimeCall::ImOnline(crate::Call::heartbeat { heartbeat, .. }) => + heartbeat, e => panic!("Unexpected call: {:?}", e), }; @@ -358,9 +356,8 @@ fn should_not_send_a_report_if_already_online() { // check stuff about the transaction. let ex: Extrinsic = Decode::decode(&mut &*transaction).unwrap(); let heartbeat = match ex.call { - crate::mock::RuntimeCall::ImOnline(crate::Call::heartbeat { heartbeat, .. }) => { - heartbeat - }, + crate::mock::RuntimeCall::ImOnline(crate::Call::heartbeat { heartbeat, .. }) => + heartbeat, e => panic!("Unexpected call: {:?}", e), }; diff --git a/frame/lottery/src/lib.rs b/frame/lottery/src/lib.rs index 5371730cc11f1..c501a30ef5f4a 100644 --- a/frame/lottery/src/lib.rs +++ b/frame/lottery/src/lib.rs @@ -267,11 +267,11 @@ pub mod pallet { LotteryIndex::::mutate(|index| *index = index.saturating_add(1)); // Set a new start with the current block. config.start = n; - return T::WeightInfo::on_initialize_repeat(); + return T::WeightInfo::on_initialize_repeat() } else { // Else, kill the lottery storage. *lottery = None; - return T::WeightInfo::on_initialize_end(); + return T::WeightInfo::on_initialize_end() } // We choose not need to kill Participants and Tickets to avoid a large // number of writes at one time. Instead, data persists between lotteries, @@ -424,7 +424,7 @@ impl Pallet { fn call_to_index(call: &::RuntimeCall) -> Result { let encoded_call = call.encode(); if encoded_call.len() < 2 { - return Err(Error::::EncodingFailed.into()); + return Err(Error::::EncodingFailed.into()) } Ok((encoded_call[0], encoded_call[1])) } @@ -487,14 +487,14 @@ impl Pallet { /// Returns `None` if there are no tickets. fn choose_ticket(total: u32) -> Option { if total == 0 { - return None; + return None } let mut random_number = Self::generate_random_number(0); // Best effort attempt to remove bias from modulus operator. for i in 1..T::MaxGenerateRandom::get() { if random_number < u32::MAX - u32::MAX % total { - break; + break } random_number = Self::generate_random_number(i); diff --git a/frame/membership/src/lib.rs b/frame/membership/src/lib.rs index 1971291dec819..4191bbcc5d86e 100644 --- a/frame/membership/src/lib.rs +++ b/frame/membership/src/lib.rs @@ -222,7 +222,7 @@ pub mod pallet { let add = T::Lookup::lookup(add)?; if remove == add { - return Ok(()); + return Ok(()) } let mut members = >::get(); diff --git a/frame/membership/src/migrations/v4.rs b/frame/membership/src/migrations/v4.rs index b72d8e162a2d4..5b8735aa2bac9 100644 --- a/frame/membership/src/migrations/v4.rs +++ b/frame/membership/src/migrations/v4.rs @@ -46,7 +46,7 @@ pub fn migrate::on_chain_storage_version(); @@ -85,7 +85,7 @@ pub fn pre_migrate>(old_pallet_name: N, new_ log_migration("pre-migration", old_pallet_name, new_pallet_name); if new_pallet_name == old_pallet_name { - return; + return } let new_pallet_prefix = twox_128(new_pallet_name.as_bytes()); @@ -113,7 +113,7 @@ pub fn post_migrate>(old_pallet_name: N, new log_migration("post-migration", old_pallet_name, new_pallet_name); if new_pallet_name == old_pallet_name { - return; + return } // Assert that nothing remains at the old prefix. diff --git a/frame/merkle-mountain-range/rpc/src/lib.rs b/frame/merkle-mountain-range/rpc/src/lib.rs index 0fbc807d3d01d..8476d82f3e70d 100644 --- a/frame/merkle-mountain-range/rpc/src/lib.rs +++ b/frame/merkle-mountain-range/rpc/src/lib.rs @@ -233,8 +233,8 @@ where /// Converts an mmr-specific error into a [`CallError`]. fn mmr_error_into_rpc_error(err: MmrError) -> CallError { - let error_code = MMR_ERROR - + match err { + let error_code = MMR_ERROR + + match err { MmrError::LeafNotFound => 1, MmrError::GenerateProof => 2, MmrError::Verify => 3, diff --git a/frame/merkle-mountain-range/src/lib.rs b/frame/merkle-mountain-range/src/lib.rs index 207bfdf7f2dc6..a2d42417ae5dc 100644 --- a/frame/merkle-mountain-range/src/lib.rs +++ b/frame/merkle-mountain-range/src/lib.rs @@ -227,14 +227,14 @@ pub mod pallet { // MMR push never fails, but better safe than sorry. if mmr.push(data).is_none() { log::error!(target: "runtime::mmr", "MMR push failed"); - return T::WeightInfo::on_initialize(peaks_before); + return T::WeightInfo::on_initialize(peaks_before) } // Update the size, `mmr.finalize()` should also never fail. let (leaves, root) = match mmr.finalize() { Ok((leaves, root)) => (leaves, root), Err(e) => { log::error!(target: "runtime::mmr", "MMR finalize failed: {:?}", e); - return T::WeightInfo::on_initialize(peaks_before); + return T::WeightInfo::on_initialize(peaks_before) }, }; >::on_new_root(&root); @@ -428,12 +428,12 @@ impl, I: 'static> Pallet { leaves: Vec>, proof: primitives::Proof<>::Hash>, ) -> Result<(), primitives::Error> { - if proof.leaf_count > Self::mmr_leaves() - || proof.leaf_count == 0 - || (proof.items.len().saturating_add(leaves.len())) as u64 > proof.leaf_count + if proof.leaf_count > Self::mmr_leaves() || + proof.leaf_count == 0 || + (proof.items.len().saturating_add(leaves.len())) as u64 > proof.leaf_count { return Err(primitives::Error::Verify - .log_debug("The proof has incorrect number of leaves or proof items.")); + .log_debug("The proof has incorrect number of leaves or proof items.")) } let mmr: ModuleMmr = mmr::Mmr::new(proof.leaf_count); diff --git a/frame/merkle-mountain-range/src/mmr/mmr.rs b/frame/merkle-mountain-range/src/mmr/mmr.rs index 7d45ffb9f3eb4..1f5a5bdae380b 100644 --- a/frame/merkle-mountain-range/src/mmr/mmr.rs +++ b/frame/merkle-mountain-range/src/mmr/mmr.rs @@ -42,7 +42,7 @@ where let size = NodesUtils::new(proof.leaf_count).size(); if leaves.len() != proof.leaf_indices.len() { - return Err(Error::Verify.log_debug("Proof leaf_indices not same length with leaves")); + return Err(Error::Verify.log_debug("Proof leaf_indices not same length with leaves")) } let leaves_and_position_data = proof @@ -103,7 +103,7 @@ where ); if leaves.len() != proof.leaf_indices.len() { - return Err(Error::Verify.log_debug("Proof leaf_indices not same length with leaves")); + return Err(Error::Verify.log_debug("Proof leaf_indices not same length with leaves")) } let leaves_positions_and_data = proof diff --git a/frame/merkle-mountain-range/src/mmr/storage.rs b/frame/merkle-mountain-range/src/mmr/storage.rs index 297337aaf2d37..d16ca8cf1e5c8 100644 --- a/frame/merkle-mountain-range/src/mmr/storage.rs +++ b/frame/merkle-mountain-range/src/mmr/storage.rs @@ -230,7 +230,7 @@ where // change and maybe we mess up storage migration, // return _if and only if_ node is found (in normal conditions it's always found), if let Some(elem) = sp_io::offchain::local_storage_get(StorageKind::PERSISTENT, &key) { - return Ok(codec::Decode::decode(&mut &*elem).ok()); + return Ok(codec::Decode::decode(&mut &*elem).ok()) } // BUT if we DID MESS UP, fall through to searching node using fork-specific key. } @@ -274,7 +274,7 @@ where fn append(&mut self, pos: NodeIndex, elems: Vec>) -> mmr_lib::Result<()> { if elems.is_empty() { - return Ok(()); + return Ok(()) } trace!( @@ -286,7 +286,7 @@ where let size = NodesUtils::new(leaves).size(); if pos != size { - return Err(mmr_lib::Error::InconsistentStore); + return Err(mmr_lib::Error::InconsistentStore) } let new_size = size + elems.len() as NodeIndex; diff --git a/frame/merkle-mountain-range/src/mmr/utils.rs b/frame/merkle-mountain-range/src/mmr/utils.rs index 6cd9b9c273ec7..0b8e88a9283da 100644 --- a/frame/merkle-mountain-range/src/mmr/utils.rs +++ b/frame/merkle-mountain-range/src/mmr/utils.rs @@ -55,7 +55,7 @@ impl NodesUtils { // Translate a _leaf_ `NodeIndex` to its `LeafIndex`. fn leaf_node_index_to_leaf_index(pos: NodeIndex) -> LeafIndex { if pos == 0 { - return 0; + return 0 } let peaks = helper::get_peaks(pos); (pos + peaks.len() as u64) >> 1 @@ -73,7 +73,7 @@ impl NodesUtils { pub fn right_branch_ending_in_leaf(leaf_index: LeafIndex) -> crate::Vec { let pos = helper::leaf_index_to_pos(leaf_index); let num_parents = leaf_index.trailing_ones() as u64; - return (pos..=pos + num_parents).collect(); + return (pos..=pos + num_parents).collect() } } diff --git a/frame/multisig/src/migrations.rs b/frame/multisig/src/migrations.rs index 12087e6f15e8c..5085297cde433 100644 --- a/frame/multisig/src/migrations.rs +++ b/frame/multisig/src/migrations.rs @@ -59,7 +59,7 @@ pub mod v1 { if onchain > 0 { log!(info, "MigrateToV1 should be removed"); - return T::DbWeight::get().reads(1); + return T::DbWeight::get().reads(1) } Calls::::drain().for_each(|(_call_hash, (_data, caller, deposit))| { diff --git a/frame/nft-fractionalisation/src/lib.rs b/frame/nft-fractionalisation/src/lib.rs index c9dd822e7a5d6..ad5fdb255522f 100644 --- a/frame/nft-fractionalisation/src/lib.rs +++ b/frame/nft-fractionalisation/src/lib.rs @@ -154,20 +154,15 @@ pub mod pallet { Self::do_lock_nft(collection_id, item_id)?; Self::do_create_asset(asset_id, admin_account_id, min_balance)?; - Self::do_mint_asset( - asset_id, - &beneficiary, - amount, - )?; + Self::do_mint_asset(asset_id, &beneficiary, amount)?; // Mutate this storage item to retain information about the amount minted. >::try_mutate( asset_id, |assets_minted| -> Result<(), DispatchError> { match assets_minted.is_some() { - true => { - *assets_minted = Some(assets_minted.unwrap().saturating_add(amount)) - }, + true => + *assets_minted = Some(assets_minted.unwrap().saturating_add(amount)), false => *assets_minted = Some(amount), } @@ -187,7 +182,8 @@ pub mod pallet { Ok(()) } - /// Return and burn a % of the asset, unlock the NFT. Currently 100% is the minimum threshold. + /// Return and burn a % of the asset, unlock the NFT. Currently 100% is the minimum + /// threshold. #[pallet::weight(10_000 + T::DbWeight::get().writes(2).ref_time())] pub fn burn_asset_unlock_nft( origin: OriginFor, @@ -240,7 +236,7 @@ pub mod pallet { >::take(asset_id); return Ok(()) }, - Err(e) => return Err(e), + Err(e) => return Err(e), } } diff --git a/frame/nfts/src/features/metadata.rs b/frame/nfts/src/features/metadata.rs index eaa13f1529a04..3a12dbe64f2f4 100644 --- a/frame/nfts/src/features/metadata.rs +++ b/frame/nfts/src/features/metadata.rs @@ -30,8 +30,8 @@ impl, I: 'static> Pallet { let item_config = Self::get_item_config(&collection, &item)?; ensure!( - maybe_check_owner.is_none() - || item_config.is_setting_enabled(ItemSetting::UnlockedMetadata), + maybe_check_owner.is_none() || + item_config.is_setting_enabled(ItemSetting::UnlockedMetadata), Error::::LockedItemMetadata ); @@ -48,8 +48,8 @@ impl, I: 'static> Pallet { let old_deposit = metadata.take().map_or(Zero::zero(), |m| m.deposit); collection_details.owner_deposit.saturating_reduce(old_deposit); let mut deposit = Zero::zero(); - if collection_config.is_setting_enabled(CollectionSetting::DepositRequired) - && maybe_check_owner.is_some() + if collection_config.is_setting_enabled(CollectionSetting::DepositRequired) && + maybe_check_owner.is_some() { deposit = T::DepositPerByte::get() .saturating_mul(((data.len()) as u32).into()) @@ -108,8 +108,8 @@ impl, I: 'static> Pallet { ) -> DispatchResult { let collection_config = Self::get_collection_config(&collection)?; ensure!( - maybe_check_owner.is_none() - || collection_config.is_setting_enabled(CollectionSetting::UnlockedMetadata), + maybe_check_owner.is_none() || + collection_config.is_setting_enabled(CollectionSetting::UnlockedMetadata), Error::::LockedCollectionMetadata ); @@ -123,8 +123,8 @@ impl, I: 'static> Pallet { let old_deposit = metadata.take().map_or(Zero::zero(), |m| m.deposit); details.owner_deposit.saturating_reduce(old_deposit); let mut deposit = Zero::zero(); - if maybe_check_owner.is_some() - && collection_config.is_setting_enabled(CollectionSetting::DepositRequired) + if maybe_check_owner.is_some() && + collection_config.is_setting_enabled(CollectionSetting::DepositRequired) { deposit = T::DepositPerByte::get() .saturating_mul(((data.len()) as u32).into()) @@ -158,8 +158,8 @@ impl, I: 'static> Pallet { let collection_config = Self::get_collection_config(&collection)?; ensure!( - maybe_check_owner.is_none() - || collection_config.is_setting_enabled(CollectionSetting::UnlockedMetadata), + maybe_check_owner.is_none() || + collection_config.is_setting_enabled(CollectionSetting::UnlockedMetadata), Error::::LockedCollectionMetadata ); diff --git a/frame/nfts/src/features/settings.rs b/frame/nfts/src/features/settings.rs index 2bf782eac5fe4..5f408ed183c35 100644 --- a/frame/nfts/src/features/settings.rs +++ b/frame/nfts/src/features/settings.rs @@ -98,6 +98,6 @@ impl, I: 'static> Pallet { pub(crate) fn is_pallet_feature_enabled(feature: PalletFeature) -> bool { let features = T::Features::get(); - return features.is_enabled(feature); + return features.is_enabled(feature) } } diff --git a/frame/nfts/src/features/transfer.rs b/frame/nfts/src/features/transfer.rs index f681e568a7683..7d6ae3553a361 100644 --- a/frame/nfts/src/features/transfer.rs +++ b/frame/nfts/src/features/transfer.rs @@ -93,7 +93,7 @@ impl, I: 'static> Pallet { let details = maybe_details.as_mut().ok_or(Error::::UnknownCollection)?; ensure!(origin == details.owner, Error::::NoPermission); if details.owner == owner { - return Ok(()); + return Ok(()) } // Move the deposit to the new owner. @@ -144,7 +144,7 @@ impl, I: 'static> Pallet { Collection::::try_mutate(collection, |maybe_details| { let details = maybe_details.as_mut().ok_or(Error::::UnknownCollection)?; if details.owner == owner { - return Ok(()); + return Ok(()) } // Move the deposit to the new owner. diff --git a/frame/nfts/src/impl_nonfungibles.rs b/frame/nfts/src/impl_nonfungibles.rs index d6f1af57328b2..574d256a7705b 100644 --- a/frame/nfts/src/impl_nonfungibles.rs +++ b/frame/nfts/src/impl_nonfungibles.rs @@ -92,11 +92,9 @@ impl, I: 'static> Inspect<::AccountId> for Palle ItemConfigOf::::get(collection, item), ) { (Some(cc), Some(ic)) - if cc.is_setting_enabled(CollectionSetting::TransferableItems) - && ic.is_setting_enabled(ItemSetting::Transferable) => - { - true - }, + if cc.is_setting_enabled(CollectionSetting::TransferableItems) && + ic.is_setting_enabled(ItemSetting::Transferable) => + true, _ => false, } } @@ -174,7 +172,7 @@ impl, I: 'static> Mutate<::AccountId, ItemConfig Self::do_burn(*collection, *item, |d| { if let Some(check_owner) = maybe_check_owner { if &d.owner != check_owner { - return Err(Error::::NoPermission.into()); + return Err(Error::::NoPermission.into()) } } Ok(()) diff --git a/frame/nfts/src/lib.rs b/frame/nfts/src/lib.rs index df29776622778..0182c54ea41f3 100644 --- a/frame/nfts/src/lib.rs +++ b/frame/nfts/src/lib.rs @@ -968,10 +968,10 @@ pub mod pallet { if T::Currency::reserve(&details.deposit.account, deposit - old).is_err() { // NOTE: No alterations made to collection_details in this iteration so far, // so this is OK to do. - continue; + continue } } else { - continue; + continue } details.deposit.amount = deposit; Item::::insert(&collection, &item, &details); diff --git a/frame/nfts/src/tests.rs b/frame/nfts/src/tests.rs index 14bf3f6b94c2d..e1b875702ff87 100644 --- a/frame/nfts/src/tests.rs +++ b/frame/nfts/src/tests.rs @@ -2208,9 +2208,9 @@ fn collection_locking_should_work() { let stored_config = CollectionConfigOf::::get(collection_id).unwrap(); let full_lock_config = collection_config_from_disabled_settings( - CollectionSetting::TransferableItems - | CollectionSetting::UnlockedMetadata - | CollectionSetting::UnlockedAttributes, + CollectionSetting::TransferableItems | + CollectionSetting::UnlockedMetadata | + CollectionSetting::UnlockedAttributes, ); assert_eq!(stored_config, full_lock_config); }); diff --git a/frame/node-authorization/src/lib.rs b/frame/node-authorization/src/lib.rs index 222da86ed2d4c..bd1b14d10b013 100644 --- a/frame/node-authorization/src/lib.rs +++ b/frame/node-authorization/src/lib.rs @@ -275,7 +275,7 @@ pub mod pallet { ensure!(add.0.len() < T::MaxPeerIdLength::get() as usize, Error::::PeerIdTooLong); if remove == add { - return Ok(()); + return Ok(()) } let mut nodes = WellKnownNodes::::get(); @@ -394,7 +394,7 @@ pub mod pallet { for add_node in connections.iter() { if *add_node == node { - continue; + continue } nodes.insert(add_node.clone()); } diff --git a/frame/nomination-pools/fuzzer/src/call.rs b/frame/nomination-pools/fuzzer/src/call.rs index 95a3ad37f34e5..b07903609e8ab 100644 --- a/frame/nomination-pools/fuzzer/src/call.rs +++ b/frame/nomination-pools/fuzzer/src/call.rs @@ -173,7 +173,7 @@ impl RewardAgent { fn join(&mut self) { if self.pool_id.is_some() { - return; + return } let pool_id = LastPoolId::::get(); let amount = 10 * ExistentialDeposit::get(); @@ -189,7 +189,7 @@ impl RewardAgent { // calculated. if !PoolMembers::::contains_key(&self.who) { log!(warn, "reward agent is not in the pool yet, cannot claim"); - return; + return } let pre = Balances::free_balance(&42); let origin = RuntimeOrigin::signed(42); @@ -266,8 +266,8 @@ fn main() { } // execute sanity checks at a fixed interval, possibly on every block. - if iteration - % (std::env::var("SANITY_CHECK_INTERVAL") + if iteration % + (std::env::var("SANITY_CHECK_INTERVAL") .ok() .and_then(|x| x.parse::().ok())) .unwrap_or(1) == 0 diff --git a/frame/nomination-pools/src/lib.rs b/frame/nomination-pools/src/lib.rs index bce737b655296..9ca9539b3dca8 100644 --- a/frame/nomination-pools/src/lib.rs +++ b/frame/nomination-pools/src/lib.rs @@ -502,10 +502,9 @@ impl PoolMember { ) -> Result<(), Error> { if let Some(new_points) = self.points.checked_sub(&points_dissolved) { match self.unbonding_eras.get_mut(&unbonding_era) { - Some(already_unbonding_points) => { + Some(already_unbonding_points) => *already_unbonding_points = - already_unbonding_points.saturating_add(points_issued) - }, + already_unbonding_points.saturating_add(points_issued), None => self .unbonding_eras .try_insert(unbonding_era, points_issued) @@ -747,8 +746,8 @@ impl BondedPool { } fn can_nominate(&self, who: &T::AccountId) -> bool { - self.is_root(who) - || self.roles.nominator.as_ref().map_or(false, |nominator| nominator == who) + self.is_root(who) || + self.roles.nominator.as_ref().map_or(false, |nominator| nominator == who) } fn can_kick(&self, who: &T::AccountId) -> bool { @@ -841,9 +840,9 @@ impl BondedPool { // any unbond must comply with the balance condition: ensure!( - is_full_unbond - || balance_after_unbond - >= if is_depositor { + is_full_unbond || + balance_after_unbond >= + if is_depositor { Pallet::::depositor_min_bond() } else { MinJoinBond::::get() @@ -875,7 +874,7 @@ impl BondedPool { }, (false, true) => { // the depositor can simply not be unbonded permissionlessly, period. - return Err(Error::::DoesNotHavePermission.into()); + return Err(Error::::DoesNotHavePermission.into()) }, }; @@ -1579,12 +1578,10 @@ pub mod pallet { Self::do_reward_payout(&who, &mut member, &mut bonded_pool, &mut reward_pool)?; let (points_issued, bonded) = match extra { - BondExtra::FreeBalance(amount) => { - (bonded_pool.try_bond_funds(&who, amount, BondType::Later)?, amount) - }, - BondExtra::Rewards => { - (bonded_pool.try_bond_funds(&who, claimed, BondType::Later)?, claimed) - }, + BondExtra::FreeBalance(amount) => + (bonded_pool.try_bond_funds(&who, amount, BondType::Later)?, amount), + BondExtra::Rewards => + (bonded_pool.try_bond_funds(&who, claimed, BondType::Later)?, claimed), }; bonded_pool.ok_to_be_open()?; @@ -2137,7 +2134,7 @@ impl Pallet { let current_reward_counter = reward_pool .current_reward_counter(pool_member.pool_id, bonded_pool.points) .ok()?; - return pool_member.pending_rewards(current_reward_counter).ok(); + return pool_member.pending_rewards(current_reward_counter).ok() } } @@ -2279,7 +2276,7 @@ impl Pallet { let balance = |x| T::U256ToBalance::convert(x); if current_balance.is_zero() || current_points.is_zero() || points.is_zero() { // There is nothing to unbond - return Zero::zero(); + return Zero::zero() } // Equivalent of (current_balance / current_points) * points @@ -2307,7 +2304,7 @@ impl Pallet { let pending_rewards = member.pending_rewards(current_reward_counter)?; if pending_rewards.is_zero() { - return Ok(pending_rewards); + return Ok(pending_rewards) } // IFF the reward is non-zero alter the member and reward pool info. @@ -2441,7 +2438,7 @@ impl Pallet { #[cfg(any(feature = "try-runtime", feature = "fuzzing", test, debug_assertions))] pub fn do_try_state(level: u8) -> Result<(), &'static str> { if level.is_zero() { - return Ok(()); + return Ok(()) } // note: while a bit wacky, since they have the same key, even collecting to vec should // result in the same set of keys, in the same order. @@ -2493,8 +2490,8 @@ impl Pallet { RewardPool::::current_balance(id) ); assert!( - RewardPool::::current_balance(id) - >= pools_members_pending_rewards.get(&id).map(|x| *x).unwrap_or_default() + RewardPool::::current_balance(id) >= + pools_members_pending_rewards.get(&id).map(|x| *x).unwrap_or_default() ) }); @@ -2509,8 +2506,8 @@ impl Pallet { let depositor = PoolMembers::::get(&bonded_pool.roles.depositor).unwrap(); assert!( - bonded_pool.is_destroying_and_only_depositor(depositor.active_points()) - || depositor.active_points() >= MinCreateBond::::get(), + bonded_pool.is_destroying_and_only_depositor(depositor.active_points()) || + depositor.active_points() >= MinCreateBond::::get(), "depositor must always have MinCreateBond stake in the pool, except for when the \ pool is being destroyed and the depositor is the last member", ); @@ -2518,7 +2515,7 @@ impl Pallet { assert!(MaxPoolMembers::::get().map_or(true, |max| all_members <= max)); if level <= 1 { - return Ok(()); + return Ok(()) } for (pool_id, _pool) in BondedPools::::iter() { diff --git a/frame/nomination-pools/src/migration.rs b/frame/nomination-pools/src/migration.rs index 2af575a378f34..b73141c95f72c 100644 --- a/frame/nomination-pools/src/migration.rs +++ b/frame/nomination-pools/src/migration.rs @@ -219,14 +219,14 @@ pub mod v2 { Some(x) => x, None => { log!(error, "pool {} has no member! deleting it..", id); - return None; + return None }, }; let bonded_pool = match BondedPools::::get(id) { Some(x) => x, None => { log!(error, "pool {} has no bonded pool! deleting it..", id); - return None; + return None }, }; @@ -241,7 +241,7 @@ pub mod v2 { Some(x) => x, None => { log!(error, "pool {} for member {:?} does not exist!", id, who); - return None; + return None }, }; @@ -351,8 +351,8 @@ pub mod v2 { // all reward accounts must have more than ED. RewardPools::::iter().for_each(|(id, _)| { assert!( - T::Currency::free_balance(&Pallet::::create_reward_account(id)) - >= T::Currency::minimum_balance() + T::Currency::free_balance(&Pallet::::create_reward_account(id)) >= + T::Currency::minimum_balance() ) }); diff --git a/frame/preimage/src/lib.rs b/frame/preimage/src/lib.rs index 44afd90dca36e..6549832c11f5d 100644 --- a/frame/preimage/src/lib.rs +++ b/frame/preimage/src/lib.rs @@ -206,7 +206,7 @@ impl Pallet { origin: T::RuntimeOrigin, ) -> Result, BadOrigin> { if T::ManagerOrigin::ensure_origin(origin.clone()).is_ok() { - return Ok(None); + return Ok(None) } let who = ensure_signed(origin)?; Ok(Some(who)) @@ -230,15 +230,12 @@ impl Pallet { // We take a deposit only if there is a provided depositor and the preimage was not // previously requested. This also allows the tx to pay no fee. let status = match (StatusFor::::get(hash), maybe_depositor) { - (Some(RequestStatus::Requested { count, deposit, .. }), _) => { - RequestStatus::Requested { count, deposit, len: Some(len) } - }, - (Some(RequestStatus::Unrequested { .. }), Some(_)) => { - return Err(Error::::AlreadyNoted.into()) - }, - (Some(RequestStatus::Unrequested { len, deposit }), None) => { - RequestStatus::Requested { deposit: Some(deposit), count: 1, len: Some(len) } - }, + (Some(RequestStatus::Requested { count, deposit, .. }), _) => + RequestStatus::Requested { count, deposit, len: Some(len) }, + (Some(RequestStatus::Unrequested { .. }), Some(_)) => + return Err(Error::::AlreadyNoted.into()), + (Some(RequestStatus::Unrequested { len, deposit }), None) => + RequestStatus::Requested { deposit: Some(deposit), count: 1, len: Some(len) }, (None, None) => RequestStatus::Requested { count: 1, len: Some(len), deposit: None }, (None, Some(depositor)) => { let length = preimage.len() as u32; diff --git a/frame/preimage/src/migration.rs b/frame/preimage/src/migration.rs index 2acfdb54718d1..a5d15c23c758a 100644 --- a/frame/preimage/src/migration.rs +++ b/frame/preimage/src/migration.rs @@ -94,7 +94,7 @@ pub mod v1 { "skipping MovePreimagesIntoBuckets: executed on wrong storage version.\ Expected version 0" ); - return weight; + return weight } let status = v0::StatusFor::::drain().collect::>(); @@ -108,7 +108,7 @@ pub mod v1 { preimage } else { log::error!(target: TARGET, "preimage not found for hash {:?}", &hash); - continue; + continue }; let len = preimage.len() as u32; if len > MAX_SIZE { @@ -118,24 +118,22 @@ pub mod v1 { &hash, len ); - continue; + continue } let status = match status { v0::RequestStatus::Unrequested(deposit) => match deposit { Some(deposit) => RequestStatus::Unrequested { deposit, len }, // `None` depositor becomes system-requested. - None => { - RequestStatus::Requested { deposit: None, count: 1, len: Some(len) } - }, + None => + RequestStatus::Requested { deposit: None, count: 1, len: Some(len) }, }, v0::RequestStatus::Requested(count) if count == 0 => { log::error!(target: TARGET, "preimage has counter of zero: {:?}", hash); - continue; - }, - v0::RequestStatus::Requested(count) => { - RequestStatus::Requested { deposit: None, count, len: Some(len) } + continue }, + v0::RequestStatus::Requested(count) => + RequestStatus::Requested { deposit: None, count, len: Some(len) }, }; log::trace!(target: TARGET, "Moving preimage {:?} with len {}", hash, len); diff --git a/frame/proxy/src/lib.rs b/frame/proxy/src/lib.rs index e5c7c7fdc3be8..5c07a2b012243 100644 --- a/frame/proxy/src/lib.rs +++ b/frame/proxy/src/lib.rs @@ -499,9 +499,9 @@ pub mod pallet { let call_hash = T::CallHasher::hash_of(&call); let now = system::Pallet::::block_number(); Self::edit_announcements(&delegate, |ann| { - ann.real != real - || ann.call_hash != call_hash - || now.saturating_sub(ann.height) < def.delay + ann.real != real || + ann.call_hash != call_hash || + now.saturating_sub(ann.height) < def.delay }) .map_err(|_| Error::::Unannounced)?; @@ -758,8 +758,8 @@ impl Pallet { force_proxy_type: Option, ) -> Result, DispatchError> { let f = |x: &ProxyDefinition| -> bool { - &x.delegate == delegate - && force_proxy_type.as_ref().map_or(true, |y| &x.proxy_type == y) + &x.delegate == delegate && + force_proxy_type.as_ref().map_or(true, |y| &x.proxy_type == y) }; Ok(Proxies::::get(real).0.into_iter().find(f).ok_or(Error::::NotProxy)?) } @@ -777,19 +777,15 @@ impl Pallet { match c.is_sub_type() { // Proxy call cannot add or remove a proxy with more permissions than it already // has. - Some(Call::add_proxy { ref proxy_type, .. }) - | Some(Call::remove_proxy { ref proxy_type, .. }) + Some(Call::add_proxy { ref proxy_type, .. }) | + Some(Call::remove_proxy { ref proxy_type, .. }) if !def.proxy_type.is_superset(proxy_type) => - { - false - }, + false, // Proxy call cannot remove all proxies or kill pure proxies unless it has full // permissions. Some(Call::remove_proxies { .. }) | Some(Call::kill_pure { .. }) if def.proxy_type != T::ProxyType::default() => - { - false - }, + false, _ => def.proxy_type.filter(c), } }); diff --git a/frame/ranked-collective/src/lib.rs b/frame/ranked-collective/src/lib.rs index 2bf7f1948fb68..33aed2704918c 100644 --- a/frame/ranked-collective/src/lib.rs +++ b/frame/ranked-collective/src/lib.rs @@ -577,9 +577,8 @@ pub mod pallet { poll, |mut status| -> Result<(TallyOf, VoteRecord), DispatchError> { match status { - PollStatus::None | PollStatus::Completed(..) => { - Err(Error::::NotPolling)? - }, + PollStatus::None | PollStatus::Completed(..) => + Err(Error::::NotPolling)?, PollStatus::Ongoing(ref mut tally, class) => { match Voting::::get(&poll, &who) { Some(Aye(votes)) => { @@ -635,7 +634,7 @@ pub mod pallet { ); if r.unique == 0 { // return Err(Error::::NoneRemaining) - return Ok(Pays::Yes.into()); + return Ok(Pays::Yes.into()) } if let Some(cursor) = r.maybe_cursor { VotingCleanup::::insert(poll_index, BoundedVec::truncate_from(cursor)); diff --git a/frame/ranked-collective/src/tests.rs b/frame/ranked-collective/src/tests.rs index 5596f44f594d0..68bb79f3d07f7 100644 --- a/frame/ranked-collective/src/tests.rs +++ b/frame/ranked-collective/src/tests.rs @@ -120,9 +120,8 @@ impl Polling> for TestPolls { let mut polls = Polls::get(); let entry = polls.get_mut(&index); let r = match entry { - Some(Ongoing(ref mut tally_mut_ref, class)) => { - f(PollStatus::Ongoing(tally_mut_ref, *class)) - }, + Some(Ongoing(ref mut tally_mut_ref, class)) => + f(PollStatus::Ongoing(tally_mut_ref, *class)), Some(Completed(when, succeeded)) => f(PollStatus::Completed(*when, *succeeded)), None => f(PollStatus::None), }; @@ -138,9 +137,8 @@ impl Polling> for TestPolls { let mut polls = Polls::get(); let entry = polls.get_mut(&index); let r = match entry { - Some(Ongoing(ref mut tally_mut_ref, class)) => { - f(PollStatus::Ongoing(tally_mut_ref, *class)) - }, + Some(Ongoing(ref mut tally_mut_ref, class)) => + f(PollStatus::Ongoing(tally_mut_ref, *class)), Some(Completed(when, succeeded)) => f(PollStatus::Completed(*when, *succeeded)), None => f(PollStatus::None), }?; diff --git a/frame/referenda/src/branch.rs b/frame/referenda/src/branch.rs index 67527458b047b..d3744979fc547 100644 --- a/frame/referenda/src/branch.rs +++ b/frame/referenda/src/branch.rs @@ -113,17 +113,17 @@ impl ServiceBranch { NotQueued => T::WeightInfo::place_decision_deposit_not_queued(), BeginDecidingPassing => T::WeightInfo::place_decision_deposit_passing(), BeginDecidingFailing => T::WeightInfo::place_decision_deposit_failing(), - BeginConfirming - | ContinueConfirming - | EndConfirming - | ContinueNotConfirming - | Approved - | Rejected - | RequeuedInsertion - | RequeuedSlide - | TimedOut - | Fail - | NoDeposit => return None, + BeginConfirming | + ContinueConfirming | + EndConfirming | + ContinueNotConfirming | + Approved | + Rejected | + RequeuedInsertion | + RequeuedSlide | + TimedOut | + Fail | + NoDeposit => return None, }; Some(ref_time_weight) diff --git a/frame/referenda/src/lib.rs b/frame/referenda/src/lib.rs index 40f2c33c7bdd3..ba5f4aec956b1 100644 --- a/frame/referenda/src/lib.rs +++ b/frame/referenda/src/lib.rs @@ -755,8 +755,8 @@ impl, I: 'static> Pallet { when: T::BlockNumber, ) -> Option<(T::BlockNumber, ScheduleAddressOf)> { let alarm_interval = T::AlarmInterval::get().max(One::one()); - let when = when.saturating_add(alarm_interval).saturating_sub(One::one()) - / (alarm_interval.saturating_mul(alarm_interval)).max(One::one()); + let when = when.saturating_add(alarm_interval).saturating_sub(One::one()) / + (alarm_interval.saturating_mul(alarm_interval)).max(One::one()); let maybe_result = T::Scheduler::schedule( DispatchTime::At(when), None, @@ -872,7 +872,7 @@ impl, I: 'static> Pallet { Ok(c) => c, Err(_) => { debug_assert!(false, "Unable to create a bounded call from `one_fewer_deciding`??",); - return; + return }, }; let maybe_result = T::Scheduler::schedule( @@ -911,7 +911,7 @@ impl, I: 'static> Pallet { false, "Unable to create a bounded call from `nudge_referendum`??", ); - return false; + return false }, }; status.alarm = Self::set_alarm(call, alarm); @@ -1012,7 +1012,7 @@ impl, I: 'static> Pallet { ), true, ServiceBranch::TimedOut, - ); + ) } }, Some(deciding) => { @@ -1044,7 +1044,7 @@ impl, I: 'static> Pallet { ), true, ServiceBranch::Approved, - ); + ) }, Some(_) => ServiceBranch::ContinueConfirming, None => { @@ -1069,7 +1069,7 @@ impl, I: 'static> Pallet { ), true, ServiceBranch::Rejected, - ); + ) } if deciding.confirming.is_some() { // Stop confirming @@ -1159,7 +1159,7 @@ impl, I: 'static> Pallet { id: TrackIdOf, ) -> bool { let x = Perbill::from_rational(elapsed.min(period), period); - support_needed.passing(x, tally.support(id)) - && approval_needed.passing(x, tally.approval(id)) + support_needed.passing(x, tally.support(id)) && + approval_needed.passing(x, tally.approval(id)) } } diff --git a/frame/referenda/src/types.rs b/frame/referenda/src/types.rs index dc2aa5fb8fd53..a97faca3bbfc2 100644 --- a/frame/referenda/src/types.rs +++ b/frame/referenda/src/types.rs @@ -251,9 +251,8 @@ impl< Ongoing(x) if x.decision_deposit.is_none() => Ok(None), // Cannot refund deposit if Ongoing as this breaks assumptions. Ongoing(_) => Err(()), - Approved(_, _, d) | Rejected(_, _, d) | TimedOut(_, _, d) | Cancelled(_, _, d) => { - Ok(d.take()) - }, + Approved(_, _, d) | Rejected(_, _, d) | TimedOut(_, _, d) | Cancelled(_, _, d) => + Ok(d.take()), Killed(_) => Ok(None), } } @@ -412,12 +411,10 @@ impl Curve { /// Determine the `y` value for the given `x` value. pub(crate) fn threshold(&self, x: Perbill) -> Perbill { match self { - Self::LinearDecreasing { length, floor, ceil } => { - *ceil - (x.min(*length).saturating_div(*length, Down) * (*ceil - *floor)) - }, - Self::SteppedDecreasing { begin, end, step, period } => { - (*begin - (step.int_mul(x.int_div(*period))).min(*begin)).max(*end) - }, + Self::LinearDecreasing { length, floor, ceil } => + *ceil - (x.min(*length).saturating_div(*length, Down) * (*ceil - *floor)), + Self::SteppedDecreasing { begin, end, step, period } => + (*begin - (step.int_mul(x.int_div(*period))).min(*begin)).max(*end), Self::Reciprocal { factor, x_offset, y_offset } => factor .checked_rounding_div(FixedI64::from(x) + *x_offset, Low) .map(|yp| (yp + *y_offset).into_clamped_perthing()) @@ -456,22 +453,20 @@ impl Curve { /// ``` pub fn delay(&self, y: Perbill) -> Perbill { match self { - Self::LinearDecreasing { length, floor, ceil } => { + Self::LinearDecreasing { length, floor, ceil } => if y < *floor { Perbill::one() } else if y > *ceil { Perbill::zero() } else { (*ceil - y).saturating_div(*ceil - *floor, Up).saturating_mul(*length) - } - }, - Self::SteppedDecreasing { begin, end, step, period } => { + }, + Self::SteppedDecreasing { begin, end, step, period } => if y < *end { Perbill::one() } else { period.int_mul((*begin - y.min(*begin) + step.less_epsilon()).int_div(*step)) - } - }, + }, Self::Reciprocal { factor, x_offset, y_offset } => { let y = FixedI64::from(y); let maybe_term = factor.checked_rounding_div(y - *y_offset, High); diff --git a/frame/scheduler/src/benchmarking.rs b/frame/scheduler/src/benchmarking.rs index 27fffe2f134ec..e621c913b2386 100644 --- a/frame/scheduler/src/benchmarking.rs +++ b/frame/scheduler/src/benchmarking.rs @@ -97,11 +97,11 @@ fn make_call(maybe_lookup_len: Option) -> Bounded<: Some(x) => x, None => { len -= 1; - continue; + continue }, }; if c.lookup_needed() == maybe_lookup_len.is_some() { - break c; + break c } if maybe_lookup_len.is_some() { len += 1; @@ -109,7 +109,7 @@ fn make_call(maybe_lookup_len: Option) -> Bounded<: if len > 0 { len -= 1; } else { - break c; + break c } } } diff --git a/frame/scheduler/src/lib.rs b/frame/scheduler/src/lib.rs index 965080b36bca2..78533540be98f 100644 --- a/frame/scheduler/src/lib.rs +++ b/frame/scheduler/src/lib.rs @@ -602,7 +602,7 @@ impl> Pallet { &h, &err ); - return None; + return None } weight.saturating_accrue(T::DbWeight::get().reads(1)); log::info!("Migrated call by hash, hash: {:?}", h); @@ -704,7 +704,7 @@ impl Pallet { }; if when <= now { - return Err(Error::::TargetBlockNumberInPast.into()); + return Err(Error::::TargetBlockNumberInPast.into()) } Ok(when) @@ -738,7 +738,7 @@ impl Pallet { agenda[hole_index] = Some(what); hole_index as u32 } else { - return Err((DispatchError::Exhausted, what)); + return Err((DispatchError::Exhausted, what)) } }; Agenda::::insert(when, agenda); @@ -783,7 +783,7 @@ impl Pallet { T::OriginPrivilegeCmp::cmp_privilege(o, &s.origin), Some(Ordering::Less) | None ) { - return Err(BadOrigin.into()); + return Err(BadOrigin.into()) } }; Ok(s.take()) @@ -798,7 +798,7 @@ impl Pallet { Self::deposit_event(Event::Canceled { when, index }); Ok(()) } else { - return Err(Error::::NotFound.into()); + return Err(Error::::NotFound.into()) } } @@ -809,7 +809,7 @@ impl Pallet { let new_time = Self::resolve_time(new_time)?; if new_time == when { - return Err(Error::::RescheduleNoChange.into()); + return Err(Error::::RescheduleNoChange.into()) } let task = Agenda::::try_mutate(when, |agenda| { @@ -832,7 +832,7 @@ impl Pallet { ) -> Result, DispatchError> { // ensure id it is unique if Lookup::::contains_key(&id) { - return Err(Error::::FailedToSchedule.into()); + return Err(Error::::FailedToSchedule.into()) } let when = Self::resolve_time(when)?; @@ -865,7 +865,7 @@ impl Pallet { T::OriginPrivilegeCmp::cmp_privilege(o, &s.origin), Some(Ordering::Less) | None ) { - return Err(BadOrigin.into()); + return Err(BadOrigin.into()) } T::Preimages::drop(&s.call); } @@ -876,7 +876,7 @@ impl Pallet { Self::deposit_event(Event::Canceled { when, index }); Ok(()) } else { - return Err(Error::::NotFound.into()); + return Err(Error::::NotFound.into()) } }) } @@ -891,7 +891,7 @@ impl Pallet { let (when, index) = lookup.ok_or(Error::::NotFound)?; if new_time == when { - return Err(Error::::RescheduleNoChange.into()); + return Err(Error::::RescheduleNoChange.into()) } let task = Agenda::::try_mutate(when, |agenda| { @@ -915,7 +915,7 @@ impl Pallet { /// Service up to `max` agendas queue starting from earliest incompletely executed agenda. fn service_agendas(weight: &mut WeightMeter, now: T::BlockNumber, max: u32) { if !weight.check_accrue(T::WeightInfo::service_agendas_base()) { - return; + return } let mut incomplete_since = now + One::one(); @@ -977,7 +977,7 @@ impl Pallet { ); if !weight.can_accrue(base_weight) { postponed += 1; - break; + break } let result = Self::service_task(weight, now, when, agenda_index, *executed == 0, task); agenda[agenda_index as usize] = match result { @@ -1103,15 +1103,14 @@ impl Pallet { let max_weight = base_weight.saturating_add(call_weight); if !weight.can_accrue(max_weight) { - return Err(Overweight); + return Err(Overweight) } let dispatch_origin = origin.into(); let (maybe_actual_call_weight, result) = match call.dispatch(dispatch_origin) { Ok(post_info) => (post_info.actual_weight, Ok(())), - Err(error_and_info) => { - (error_and_info.post_info.actual_weight, Err(error_and_info.error)) - }, + Err(error_and_info) => + (error_and_info.post_info.actual_weight, Err(error_and_info.error)), }; let call_weight = maybe_actual_call_weight.unwrap_or(call_weight); weight.check_accrue(base_weight); diff --git a/frame/scheduler/src/migration.rs b/frame/scheduler/src/migration.rs index d38eaa367c45f..6769d20023196 100644 --- a/frame/scheduler/src/migration.rs +++ b/frame/scheduler/src/migration.rs @@ -125,7 +125,7 @@ pub mod v3 { agenda.len(), max_scheduled_per_block, ); - return Err("Agenda would overflow `MaxScheduledPerBlock`."); + return Err("Agenda would overflow `MaxScheduledPerBlock`.") } } // Check that bounding the calls will not overflow `MAX_LENGTH`. @@ -142,7 +142,7 @@ pub mod v3 { block_number, l, ); - return Err("Call is too large."); + return Err("Call is too large.") } }, _ => (), @@ -162,7 +162,7 @@ pub mod v3 { Expected version 3, found {:?}", version, ); - return T::DbWeight::get().reads(1); + return T::DbWeight::get().reads(1) } crate::Pallet::::migrate_v3_to_v4() diff --git a/frame/scheduler/src/tests.rs b/frame/scheduler/src/tests.rs index 92a77011d3ae1..033d787946709 100644 --- a/frame/scheduler/src/tests.rs +++ b/frame/scheduler/src/tests.rs @@ -627,11 +627,11 @@ fn on_initialize_weight_is_correct() { // Will include the named periodic only assert_eq!( Scheduler::on_initialize(1), - TestWeightInfo::service_agendas_base() - + TestWeightInfo::service_agenda_base(1) - + ::service_task(None, true, true) - + TestWeightInfo::execute_dispatch_unsigned() - + call_weight + Weight::from_ref_time(4) + TestWeightInfo::service_agendas_base() + + TestWeightInfo::service_agenda_base(1) + + ::service_task(None, true, true) + + TestWeightInfo::execute_dispatch_unsigned() + + call_weight + Weight::from_ref_time(4) ); assert_eq!(IncompleteSince::::get(), None); assert_eq!(logger::log(), vec![(root(), 2600u32)]); @@ -639,14 +639,14 @@ fn on_initialize_weight_is_correct() { // Will include anon and anon periodic assert_eq!( Scheduler::on_initialize(2), - TestWeightInfo::service_agendas_base() - + TestWeightInfo::service_agenda_base(2) - + ::service_task(None, false, true) - + TestWeightInfo::execute_dispatch_unsigned() - + call_weight + Weight::from_ref_time(3) - + ::service_task(None, false, false) - + TestWeightInfo::execute_dispatch_unsigned() - + call_weight + Weight::from_ref_time(2) + TestWeightInfo::service_agendas_base() + + TestWeightInfo::service_agenda_base(2) + + ::service_task(None, false, true) + + TestWeightInfo::execute_dispatch_unsigned() + + call_weight + Weight::from_ref_time(3) + + ::service_task(None, false, false) + + TestWeightInfo::execute_dispatch_unsigned() + + call_weight + Weight::from_ref_time(2) ); assert_eq!(IncompleteSince::::get(), None); assert_eq!(logger::log(), vec![(root(), 2600u32), (root(), 69u32), (root(), 42u32)]); @@ -654,11 +654,11 @@ fn on_initialize_weight_is_correct() { // Will include named only assert_eq!( Scheduler::on_initialize(3), - TestWeightInfo::service_agendas_base() - + TestWeightInfo::service_agenda_base(1) - + ::service_task(None, true, false) - + TestWeightInfo::execute_dispatch_unsigned() - + call_weight + Weight::from_ref_time(1) + TestWeightInfo::service_agendas_base() + + TestWeightInfo::service_agenda_base(1) + + ::service_task(None, true, false) + + TestWeightInfo::execute_dispatch_unsigned() + + call_weight + Weight::from_ref_time(1) ); assert_eq!(IncompleteSince::::get(), None); assert_eq!( diff --git a/frame/scored-pool/src/lib.rs b/frame/scored-pool/src/lib.rs index 424f9952d2d92..a015c1c568153 100644 --- a/frame/scored-pool/src/lib.rs +++ b/frame/scored-pool/src/lib.rs @@ -455,12 +455,10 @@ impl, I: 'static> Pallet { >::put(&new_members_bounded); match notify { - ChangeReceiver::MembershipInitialized => { - T::MembershipInitialized::initialize_members(&new_members_bounded) - }, - ChangeReceiver::MembershipChanged => { - T::MembershipChanged::set_members_sorted(&new_members_bounded[..], &old_members[..]) - }, + ChangeReceiver::MembershipInitialized => + T::MembershipInitialized::initialize_members(&new_members_bounded), + ChangeReceiver::MembershipChanged => + T::MembershipChanged::set_members_sorted(&new_members_bounded[..], &old_members[..]), } } diff --git a/frame/session/src/historical/mod.rs b/frame/session/src/historical/mod.rs index 153dc28b8b2ba..45b4ba3c0a799 100644 --- a/frame/session/src/historical/mod.rs +++ b/frame/session/src/historical/mod.rs @@ -106,7 +106,7 @@ impl Pallet { let up_to = sp_std::cmp::min(up_to, end); if up_to < start { - return; // out of bounds. harmless. + return // out of bounds. harmless. } (start..up_to).for_each(::HistoricalSessions::remove); @@ -352,7 +352,7 @@ impl> KeyOwnerProofSystem<(KeyTypeId, D)> for Pallet>::validators().len() as ValidatorCount; if count != proof.validator_count { - return None; + return None } Some((owner, id)) @@ -362,7 +362,7 @@ impl> KeyOwnerProofSystem<(KeyTypeId, D)> for Pallet>::get(&proof.session)?; if count != proof.validator_count { - return None; + return None } let trie = ProvingTrie::::from_nodes(root, &proof.trie_nodes); diff --git a/frame/session/src/lib.rs b/frame/session/src/lib.rs index a882528ca4fff..7b97a20860175 100644 --- a/frame/session/src/lib.rs +++ b/frame/session/src/lib.rs @@ -681,7 +681,7 @@ impl Pallet { let mut now_session_keys = session_keys.iter(); let mut check_next_changed = |keys: &T::Keys| { if changed { - return; + return } // since a new validator set always leads to `changed` starting // as true, we can ensure that `now_session_keys` and `next_validators` @@ -717,14 +717,14 @@ impl Pallet { /// Disable the validator of index `i`, returns `false` if the validator was already disabled. pub fn disable_index(i: u32) -> bool { if i >= Validators::::decode_len().unwrap_or(0) as u32 { - return false; + return false } >::mutate(|disabled| { if let Err(index) = disabled.binary_search(&i) { disabled.insert(index, i); T::SessionHandler::on_disabled(i); - return true; + return true } false @@ -839,7 +839,7 @@ impl Pallet { if let Some(old) = old_keys.as_ref().map(|k| k.get_raw(*id)) { if key == old { - continue; + continue } Self::clear_key_owner(*id, old); diff --git a/frame/session/src/migrations/v1.rs b/frame/session/src/migrations/v1.rs index d58d9daef418b..c0dce422fe8b5 100644 --- a/frame/session/src/migrations/v1.rs +++ b/frame/session/src/migrations/v1.rs @@ -47,7 +47,7 @@ pub fn migrate::on_chain_storage_version(); @@ -104,7 +104,7 @@ pub fn pre_migrate< log_migration("pre-migration", storage_prefix_stored_range, OLD_PREFIX, new_pallet_name); if new_pallet_name == OLD_PREFIX { - return; + return } let new_pallet_prefix = twox_128(new_pallet_name.as_bytes()); @@ -145,7 +145,7 @@ pub fn post_migrate< log_migration("post-migration", storage_prefix_stored_range, OLD_PREFIX, new_pallet_name); if new_pallet_name == OLD_PREFIX { - return; + return } // Assert that no `HistoricalSessions` and `StoredRange` storages remains at the old prefix. diff --git a/frame/session/src/mock.rs b/frame/session/src/mock.rs index c7490276c2768..aa13eacba9564 100644 --- a/frame/session/src/mock.rs +++ b/frame/session/src/mock.rs @@ -122,8 +122,8 @@ pub struct TestShouldEndSession; impl ShouldEndSession for TestShouldEndSession { fn should_end_session(now: u64) -> bool { let l = SessionLength::get(); - now % l == 0 - || ForceSessionEnd::mutate(|l| { + now % l == 0 || + ForceSessionEnd::mutate(|l| { let r = *l; *l = false; r diff --git a/frame/society/src/lib.rs b/frame/society/src/lib.rs index 1d37dd2daaeff..73a09490ea579 100644 --- a/frame/society/src/lib.rs +++ b/frame/society/src/lib.rs @@ -1001,7 +1001,7 @@ pub mod pallet { } else { >::insert(&who, payouts); } - return Ok(()); + return Ok(()) } } Err(Error::::NoPayout.into()) @@ -1202,8 +1202,8 @@ pub mod pallet { // Reduce next pot by payout >::put(pot - value); // Add payout for new candidate - let maturity = >::block_number() - + Self::lock_duration(Self::members().len() as u32); + let maturity = >::block_number() + + Self::lock_duration(Self::members().len() as u32); Self::pay_accepted_candidate(&who, value, kind, maturity); }, Judgement::Reject => { @@ -1236,7 +1236,7 @@ pub mod pallet { // Remove suspended candidate >::remove(who); } else { - return Err(Error::::NotSuspended.into()); + return Err(Error::::NotSuspended.into()) } Ok(()) } @@ -1433,8 +1433,8 @@ impl, I: 'static> Pallet { // out of society. members.reserve(candidates.len()); - let maturity = >::block_number() - + Self::lock_duration(members.len() as u32); + let maturity = >::block_number() + + Self::lock_duration(members.len() as u32); let mut rewardees = Vec::new(); let mut total_approvals = 0; @@ -1621,7 +1621,7 @@ impl, I: 'static> Pallet { // whole slash is accounted for. *amount -= rest; rest = Zero::zero(); - break; + break } } >::insert(who, &payouts[dropped..]); @@ -1793,7 +1793,7 @@ impl, I: 'static> Pallet { selected.push(bid.clone()); zero_selected = true; count += 1; - return false; + return false } } else { total_cost += bid.value; @@ -1801,7 +1801,7 @@ impl, I: 'static> Pallet { if total_cost <= pot { selected.push(bid.clone()); count += 1; - return false; + return false } } } diff --git a/frame/staking/reward-curve/src/lib.rs b/frame/staking/reward-curve/src/lib.rs index f1d327a2338d7..e1ea8aa7b15d5 100644 --- a/frame/staking/reward-curve/src/lib.rs +++ b/frame/staking/reward-curve/src/lib.rs @@ -135,10 +135,10 @@ struct Bounds { impl Bounds { fn check(&self, value: u32) -> bool { - let wrong = (self.min_strict && value <= self.min) - || (!self.min_strict && value < self.min) - || (self.max_strict && value >= self.max) - || (!self.max_strict && value > self.max); + let wrong = (self.min_strict && value <= self.min) || + (!self.min_strict && value < self.min) || + (self.max_strict && value >= self.max) || + (!self.max_strict && value > self.max); !wrong } @@ -174,7 +174,7 @@ fn parse_field( value, bounds, ), - )); + )) } Ok(value) @@ -195,7 +195,7 @@ impl Parse for INposInput { ::parse(input)?; if !input.is_empty() { - return Err(input.error("expected end of input stream, no token expected")); + return Err(input.error("expected end of input stream, no token expected")) } let min_inflation = parse_field::( @@ -230,7 +230,7 @@ impl Parse for INposInput { >::parse(&args_input)?; if !args_input.is_empty() { - return Err(args_input.error("expected end of input stream, no token expected")); + return Err(args_input.error("expected end of input stream, no token expected")) } Ok(Self { @@ -272,7 +272,7 @@ impl INPoS { // See web3 docs for the details fn compute_opposite_after_x_ideal(&self, y: u32) -> u32 { if y == self.i_0 { - return u32::MAX; + return u32::MAX } // Note: the log term calculated here represents a per_million value let log = log2(self.i_ideal_times_x_ideal - self.i_0, y - self.i_0); @@ -290,8 +290,8 @@ fn compute_points(input: &INposInput) -> Vec<(u32, u32)> { // For each point p: (next_p.0 - p.0) < segment_length && (next_p.1 - p.1) < segment_length. // This ensures that the total number of segment doesn't overflow max_piece_count. - let max_length = (input.max_inflation - input.min_inflation + 1_000_000 - inpos.x_ideal) - / (input.max_piece_count - 1); + let max_length = (input.max_inflation - input.min_inflation + 1_000_000 - inpos.x_ideal) / + (input.max_piece_count - 1); let mut delta_y = max_length; let mut y = input.max_inflation; @@ -303,29 +303,29 @@ fn compute_points(input: &INposInput) -> Vec<(u32, u32)> { if next_y <= input.min_inflation { delta_y = delta_y.saturating_sub(1); - continue; + continue } let next_x = inpos.compute_opposite_after_x_ideal(next_y); if (next_x - points.last().unwrap().0) > max_length { delta_y = delta_y.saturating_sub(1); - continue; + continue } if next_x >= 1_000_000 { let prev = points.last().unwrap(); // Compute the y corresponding to x=1_000_000 using the this point and the previous one. - let delta_y: u32 = ((next_x - 1_000_000) as u64 * (prev.1 - next_y) as u64 - / (next_x - prev.0) as u64) + let delta_y: u32 = ((next_x - 1_000_000) as u64 * (prev.1 - next_y) as u64 / + (next_x - prev.0) as u64) .try_into() .unwrap(); let y = next_y + delta_y; points.push((1_000_000, y)); - return points; + return points } points.push((next_x, next_y)); y = next_y; diff --git a/frame/staking/reward-curve/src/log.rs b/frame/staking/reward-curve/src/log.rs index 6c8c662755b69..248a1e3c36a6e 100644 --- a/frame/staking/reward-curve/src/log.rs +++ b/frame/staking/reward-curve/src/log.rs @@ -39,7 +39,7 @@ pub fn log2(p: u32, q: u32) -> u32 { // log2(1) = 0 if p == q { - return 0; + return 0 } // find the power of 2 where q * 2^n <= p < q * 2^(n+1) @@ -59,7 +59,7 @@ pub fn log2(p: u32, q: u32) -> u32 { loop { let term = taylor_term(k, y_num.into(), y_den.into()); if term == 0 { - break; + break } res += term; diff --git a/frame/staking/reward-fn/src/lib.rs b/frame/staking/reward-fn/src/lib.rs index 3c672eec0adce..cc9919c28cce3 100644 --- a/frame/staking/reward-fn/src/lib.rs +++ b/frame/staking/reward-fn/src/lib.rs @@ -54,12 +54,12 @@ use sp_arithmetic::{ pub fn compute_inflation(stake: P, ideal_stake: P, falloff: P) -> P { if stake < ideal_stake { // ideal_stake is more than 0 because it is strictly more than stake - return stake / ideal_stake; + return stake / ideal_stake } if falloff < P::from_percent(1.into()) { log::error!("Invalid inflation computation: falloff less than 1% is not supported"); - return PerThing::zero(); + return PerThing::zero() } let accuracy = { @@ -130,7 +130,7 @@ fn compute_taylor_serie_part(p: &INPoSParam) -> BigUint { last_taylor_term = compute_taylor_term(k, &last_taylor_term, p); if last_taylor_term.is_zero() { - break; + break } let last_taylor_term_positive = k % 2 == 0; @@ -153,7 +153,7 @@ fn compute_taylor_serie_part(p: &INPoSParam) -> BigUint { } if !taylor_sum_positive { - return BigUint::zero(); + return BigUint::zero() } taylor_sum.lstrip(); @@ -195,15 +195,15 @@ fn div_by_stripped(mut a: BigUint, b: &BigUint) -> BigUint { if b.len() == 0 { log::error!("Computation error: Invalid division"); - return BigUint::zero(); + return BigUint::zero() } if b.len() == 1 { - return a.div_unit(b.checked_get(0).unwrap_or(1)); + return a.div_unit(b.checked_get(0).unwrap_or(1)) } if b.len() > a.len() { - return BigUint::zero(); + return BigUint::zero() } if b.len() == a.len() { @@ -217,7 +217,7 @@ fn div_by_stripped(mut a: BigUint, b: &BigUint) -> BigUint { .map(|res| res.0) .unwrap_or_else(BigUint::zero) .div_unit(100_000) - .div_unit(100_000); + .div_unit(100_000) } a.div(b, false).map(|res| res.0).unwrap_or_else(BigUint::zero) diff --git a/frame/staking/src/benchmarking.rs b/frame/staking/src/benchmarking.rs index b3fcb72a73d77..dcb861e2ce419 100644 --- a/frame/staking/src/benchmarking.rs +++ b/frame/staking/src/benchmarking.rs @@ -51,7 +51,7 @@ type MaxNominators = <::BenchmarkingConfig as BenchmarkingConfig // read and write operations. pub fn add_slashing_spans(who: &T::AccountId, spans: u32) { if spans == 0 { - return; + return } // For the first slashing span, we initialize diff --git a/frame/staking/src/inflation.rs b/frame/staking/src/inflation.rs index 4f9d4f2501d82..c7519683c75d1 100644 --- a/frame/staking/src/inflation.rs +++ b/frame/staking/src/inflation.rs @@ -42,8 +42,8 @@ where const MILLISECONDS_PER_YEAR: u64 = 1000 * 3600 * 24 * 36525 / 100; let portion = Perbill::from_rational(era_duration as u64, MILLISECONDS_PER_YEAR); - let payout = portion - * yearly_inflation + let payout = portion * + yearly_inflation .calculate_for_fraction_times_denominator(npos_token_staked, total_tokens.clone()); let maximum = portion * (yearly_inflation.maximum * total_tokens); (payout, maximum) diff --git a/frame/staking/src/lib.rs b/frame/staking/src/lib.rs index baaf82b023e7c..0f5b8e0123ab6 100644 --- a/frame/staking/src/lib.rs +++ b/frame/staking/src/lib.rs @@ -532,7 +532,7 @@ impl StakingLedger { } if unlocking_balance >= value { - break; + break } } @@ -569,7 +569,7 @@ impl StakingLedger { slash_era: EraIndex, ) -> BalanceOf { if slash_amount.is_zero() { - return Zero::zero(); + return Zero::zero() } use sp_runtime::PerThing as _; @@ -663,7 +663,7 @@ impl StakingLedger { let mut slashed_unlocking = BTreeMap::<_, _>::new(); for i in slash_chunks_priority { if remaining_slash.is_zero() { - break; + break } if let Some(chunk) = self.unlocking.get_mut(i).defensive() { @@ -671,7 +671,7 @@ impl StakingLedger { // write the new slashed value of this chunk to the map. slashed_unlocking.insert(chunk.era, chunk.value); } else { - break; + break } } diff --git a/frame/staking/src/migrations.rs b/frame/staking/src/migrations.rs index 3031900629a1b..f2ccb4f8b096f 100644 --- a/frame/staking/src/migrations.rs +++ b/frame/staking/src/migrations.rs @@ -125,9 +125,7 @@ pub mod v11 { warn, "new bags-list name is equal to the old one, only bumping the version" ); - return T::DbWeight::get() - .reads(1) - .saturating_add(T::DbWeight::get().writes(1)); + return T::DbWeight::get().reads(1).saturating_add(T::DbWeight::get().writes(1)) } move_pallet(old_pallet_name.as_bytes(), new_pallet_name.as_bytes()); @@ -150,7 +148,7 @@ pub mod v11 { // skip storage prefix checks for the same pallet names if new_pallet_name == old_pallet_name { - return Ok(()); + return Ok(()) } let old_pallet_prefix = twox_128(N::get().as_bytes()); diff --git a/frame/staking/src/mock.rs b/frame/staking/src/mock.rs index ad230914693d5..16e4e5ddd7aa2 100644 --- a/frame/staking/src/mock.rs +++ b/frame/staking/src/mock.rs @@ -712,9 +712,9 @@ pub(crate) fn on_offence_in_era( for &(bonded_era, start_session) in bonded_eras.iter() { if bonded_era == era { let _ = Staking::on_offence(offenders, slash_fraction, start_session, disable_strategy); - return; + return } else if bonded_era > era { - break; + break } } diff --git a/frame/staking/src/pallet/impls.rs b/frame/staking/src/pallet/impls.rs index 1aa5ff41e0420..c22a2bd2d1f77 100644 --- a/frame/staking/src/pallet/impls.rs +++ b/frame/staking/src/pallet/impls.rs @@ -125,10 +125,9 @@ impl Pallet { .retain(|&x| x >= current_era.saturating_sub(history_depth)); match ledger.claimed_rewards.binary_search(&era) { - Ok(_) => { + Ok(_) => return Err(Error::::AlreadyClaimed - .with_weight(T::WeightInfo::payout_stakers_alive_staked(0))) - }, + .with_weight(T::WeightInfo::payout_stakers_alive_staked(0))), Err(pos) => ledger .claimed_rewards .try_insert(pos, era) @@ -161,7 +160,7 @@ impl Pallet { // Nothing to do if they have no reward points. if validator_reward_points.is_zero() { - return Ok(Some(T::WeightInfo::payout_stakers_alive_staked(0)).into()); + return Ok(Some(T::WeightInfo::payout_stakers_alive_staked(0)).into()) } // This is the fraction of the total reward that the validator and the @@ -261,9 +260,8 @@ impl Pallet { Self::update_ledger(&controller, &l); r }), - RewardDestination::Account(dest_account) => { - Some(T::Currency::deposit_creating(&dest_account, amount)) - }, + RewardDestination::Account(dest_account) => + Some(T::Currency::deposit_creating(&dest_account, amount)), RewardDestination::None => None, } } @@ -293,14 +291,14 @@ impl Pallet { _ => { // Either `Forcing::ForceNone`, // or `Forcing::NotForcing if era_length >= T::SessionsPerEra::get()`. - return None; + return None }, } // New era. let maybe_new_era_validators = Self::try_trigger_new_era(session_index, is_genesis); - if maybe_new_era_validators.is_some() - && matches!(ForceEra::::get(), Forcing::ForceNew) + if maybe_new_era_validators.is_some() && + matches!(ForceEra::::get(), Forcing::ForceNew) { ForceEra::::put(Forcing::NotForcing); } @@ -506,7 +504,7 @@ impl Pallet { } Self::deposit_event(Event::StakingElectionFailed); - return None; + return None } Self::deposit_event(Event::StakersElected); @@ -728,8 +726,8 @@ impl Pallet { let mut nominators_taken = 0u32; let mut sorted_voters = T::VoterList::iter(); - while all_voters.len() < max_allowed_len - && voters_seen < (NPOS_MAX_ITERATIONS_COEFFICIENT * max_allowed_len as u32) + while all_voters.len() < max_allowed_len && + voters_seen < (NPOS_MAX_ITERATIONS_COEFFICIENT * max_allowed_len as u32) { let voter = match sorted_voters.next() { Some(voter) => { @@ -806,8 +804,8 @@ impl Pallet { let mut targets_seen = 0; let mut targets_iter = T::TargetList::iter(); - while all_targets.len() < max_allowed_len - && targets_seen < (NPOS_MAX_ITERATIONS_COEFFICIENT * max_allowed_len as u32) + while all_targets.len() < max_allowed_len && + targets_seen < (NPOS_MAX_ITERATIONS_COEFFICIENT * max_allowed_len as u32) { let target = match targets_iter.next() { Some(target) => { @@ -954,7 +952,7 @@ impl ElectionDataProvider for Pallet { // We can't handle this case yet -- return an error. if maybe_max_len.map_or(false, |max_len| target_count > max_len as u32) { - return Err("Target snapshot too big"); + return Err("Target snapshot too big") } Ok(Self::get_npos_targets(None)) @@ -1232,7 +1230,7 @@ where add_db_reads_writes(1, 0); if active_era.is_none() { // This offence need not be re-submitted. - return consumed_weight; + return consumed_weight } active_era.expect("value checked not to be `None`; qed").index }; @@ -1273,7 +1271,7 @@ where // Skip if the validator is invulnerable. if invulnerables.contains(stash) { - continue; + continue } let unapplied = slashing::compute_slash::(slashing::SlashParams { @@ -1647,8 +1645,8 @@ impl Pallet { fn check_count() -> Result<(), &'static str> { ensure!( - ::VoterList::count() - == Nominators::::count() + Validators::::count(), + ::VoterList::count() == + Nominators::::count() + Validators::::count(), "wrong external count" ); @@ -1672,10 +1670,9 @@ impl Pallet { ErasStakers::::iter_prefix_values(era) .map(|expo| { ensure!( - expo.total - == expo.own - + expo - .others + expo.total == + expo.own + + expo.others .iter() .map(|e| e.value) .fold(Zero::zero(), |acc, x| acc + x), diff --git a/frame/staking/src/pallet/mod.rs b/frame/staking/src/pallet/mod.rs index 7b4fb122ea40e..8fddba2150370 100644 --- a/frame/staking/src/pallet/mod.rs +++ b/frame/staking/src/pallet/mod.rs @@ -649,8 +649,8 @@ pub mod pallet { _ => Ok(()), }); assert!( - ValidatorCount::::get() - <= ::MaxWinners::get() + ValidatorCount::::get() <= + ::MaxWinners::get() ); } @@ -790,8 +790,8 @@ pub mod pallet { // ensure election results are always bounded with the same value assert!( - ::MaxWinners::get() - == ::MaxWinners::get() + ::MaxWinners::get() == + ::MaxWinners::get() ); sp_std::if_std! { @@ -841,18 +841,18 @@ pub mod pallet { let stash = ensure_signed(origin)?; if >::contains_key(&stash) { - return Err(Error::::AlreadyBonded.into()); + return Err(Error::::AlreadyBonded.into()) } let controller = T::Lookup::lookup(controller)?; if >::contains_key(&controller) { - return Err(Error::::AlreadyPaired.into()); + return Err(Error::::AlreadyPaired.into()) } // Reject a bond which is considered to be _dust_. if value < T::Currency::minimum_balance() { - return Err(Error::::InsufficientBond.into()); + return Err(Error::::InsufficientBond.into()) } frame_system::Pallet::::inc_consumers(&stash).map_err(|_| Error::::BadState)?; @@ -1044,23 +1044,24 @@ pub mod pallet { ledger = ledger.consolidate_unlocked(current_era) } - let post_info_weight = - if ledger.unlocking.is_empty() && ledger.active < T::Currency::minimum_balance() { - // This account must have called `unbond()` with some value that caused the active - // portion to fall below existential deposit + will have no more unlocking chunks - // left. We can now safely remove all staking-related information. - Self::kill_stash(&stash, num_slashing_spans)?; - // Remove the lock. - T::Currency::remove_lock(STAKING_ID, &stash); - // This is worst case scenario, so we use the full weight and return None - None - } else { - // This was the consequence of a partial unbond. just update the ledger and move on. - Self::update_ledger(&controller, &ledger); + let post_info_weight = if ledger.unlocking.is_empty() && + ledger.active < T::Currency::minimum_balance() + { + // This account must have called `unbond()` with some value that caused the active + // portion to fall below existential deposit + will have no more unlocking chunks + // left. We can now safely remove all staking-related information. + Self::kill_stash(&stash, num_slashing_spans)?; + // Remove the lock. + T::Currency::remove_lock(STAKING_ID, &stash); + // This is worst case scenario, so we use the full weight and return None + None + } else { + // This was the consequence of a partial unbond. just update the ledger and move on. + Self::update_ledger(&controller, &ledger); - // This is only an update, so we use less overall weight. - Some(T::WeightInfo::withdraw_unbonded_update(num_slashing_spans)) - }; + // This is only an update, so we use less overall weight. + Some(T::WeightInfo::withdraw_unbonded_update(num_slashing_spans)) + }; // `old_total` should never be less than the new total because // `consolidate_unlocked` strictly subtracts balance. @@ -1250,7 +1251,7 @@ pub mod pallet { let old_controller = Self::bonded(&stash).ok_or(Error::::NotStash)?; let controller = T::Lookup::lookup(controller)?; if >::contains_key(&controller) { - return Err(Error::::AlreadyPaired.into()); + return Err(Error::::AlreadyPaired.into()) } if controller != old_controller { >::insert(&stash, &controller); @@ -1550,8 +1551,8 @@ pub mod pallet { let _ = ensure_signed(origin)?; let ed = T::Currency::minimum_balance(); - let reapable = T::Currency::total_balance(&stash) < ed - || Self::ledger(Self::bonded(stash.clone()).ok_or(Error::::NotStash)?) + let reapable = T::Currency::total_balance(&stash) < ed || + Self::ledger(Self::bonded(stash.clone()).ok_or(Error::::NotStash)?) .map(|l| l.total) .unwrap_or_default() < ed; ensure!(reapable, Error::::FundedTarget); @@ -1705,7 +1706,7 @@ pub mod pallet { if Nominators::::contains_key(&stash) && Nominators::::get(&stash).is_none() { Self::chill_stash(&stash); - return Ok(()); + return Ok(()) } if caller != controller { diff --git a/frame/staking/src/slashing.rs b/frame/staking/src/slashing.rs index b2c8e22850006..a1900136d64fd 100644 --- a/frame/staking/src/slashing.rs +++ b/frame/staking/src/slashing.rs @@ -124,7 +124,7 @@ impl SlashingSpans { pub(crate) fn end_span(&mut self, now: EraIndex) -> bool { let next_start = now + 1; if next_start <= self.last_start { - return false; + return false } let last_length = next_start - self.last_start; @@ -236,7 +236,7 @@ pub(crate) fn compute_slash( // kick out the validator even if they won't be slashed, // as long as the misbehavior is from their most recent slashing span. kick_out_if_recent::(params); - return None; + return None } let (prior_slash_p, _era_slash) = @@ -259,7 +259,7 @@ pub(crate) fn compute_slash( // pays out some reward even if the latest report is not max-in-era. // we opt to avoid the nominator lookups and edits and leave more rewards // for more drastic misbehavior. - return None; + return None } // apply slash to validator. @@ -542,7 +542,7 @@ impl<'a, T: 'a + Config> Drop for InspectingSpans<'a, T> { fn drop(&mut self) { // only update on disk if we slashed this account. if !self.dirty { - return; + return } if let Some((start, end)) = self.spans.prune(self.window_start) { @@ -672,7 +672,7 @@ fn pay_reporters( // nobody to pay out to or nothing to pay; // just treat the whole value as slashed. T::Slash::on_unbalanced(slashed_imbalance); - return; + return } // take rewards out of the slashed imbalance. diff --git a/frame/staking/src/tests.rs b/frame/staking/src/tests.rs index c3157d948dd11..6609b9087637d 100644 --- a/frame/staking/src/tests.rs +++ b/frame/staking/src/tests.rs @@ -325,9 +325,9 @@ fn rewards_should_work() { assert_eq_error_rate!(Balances::total_balance(&21), init_balance_21, 2); assert_eq_error_rate!( Balances::total_balance(&100), - init_balance_100 - + part_for_100_from_10 * total_payout_0 * 2 / 3 - + part_for_100_from_20 * total_payout_0 * 1 / 3, + init_balance_100 + + part_for_100_from_10 * total_payout_0 * 2 / 3 + + part_for_100_from_20 * total_payout_0 * 1 / 3, 2 ); assert_eq_error_rate!(Balances::total_balance(&101), init_balance_101, 2); @@ -367,9 +367,9 @@ fn rewards_should_work() { assert_eq_error_rate!(Balances::total_balance(&21), init_balance_21, 2); assert_eq_error_rate!( Balances::total_balance(&100), - init_balance_100 - + part_for_100_from_10 * (total_payout_0 * 2 / 3 + total_payout_1) - + part_for_100_from_20 * total_payout_0 * 1 / 3, + init_balance_100 + + part_for_100_from_10 * (total_payout_0 * 2 / 3 + total_payout_1) + + part_for_100_from_20 * total_payout_0 * 1 / 3, 2 ); assert_eq_error_rate!(Balances::total_balance(&101), init_balance_101, 2); diff --git a/frame/state-trie-migration/src/lib.rs b/frame/state-trie-migration/src/lib.rs index 463642a5cb7e8..5255d4f6f3800 100644 --- a/frame/state-trie-migration/src/lib.rs +++ b/frame/state-trie-migration/src/lib.rs @@ -244,13 +244,13 @@ pub mod pallet { if limits.item.is_zero() || limits.size.is_zero() { // handle this minor edge case, else we would call `migrate_tick` at least once. log!(warn, "limits are zero. stopping"); - return Ok(()); + return Ok(()) } while !self.exhausted(limits) && !self.finished() { if let Err(e) = self.migrate_tick() { log!(error, "migrate_until_exhaustion failed: {:?}", e); - return Err(e); + return Err(e) } } @@ -327,7 +327,7 @@ pub mod pallet { _ => { // defensive: there must be an ongoing top migration. frame_support::defensive!("cannot migrate child key."); - return Ok(()); + return Ok(()) }, }; @@ -369,7 +369,7 @@ pub mod pallet { Progress::Complete => { // defensive: there must be an ongoing top migration. frame_support::defensive!("cannot migrate top key."); - return Ok(()); + return Ok(()) }, }; @@ -621,7 +621,7 @@ pub mod pallet { let (_imbalance, _remainder) = T::Currency::slash(&who, deposit); Self::deposit_event(Event::::Slashed { who, amount: deposit }); debug_assert!(_remainder.is_zero()); - return Ok(().into()); + return Ok(().into()) } Self::deposit_event(Event::::Migrated { @@ -1682,7 +1682,7 @@ pub(crate) mod remote_tests { let ((finished, weight), proof) = ext.execute_and_prove(|| { let weight = run_to_block::(now + One::one()).1; if StateTrieMigration::::migration_process().finished() { - return (true, weight); + return (true, weight) } duration += One::one(); now += One::one(); @@ -1709,7 +1709,7 @@ pub(crate) mod remote_tests { ext.commit_all().unwrap(); if finished { - break; + break } } diff --git a/frame/support/procedural/src/clone_no_bound.rs b/frame/support/procedural/src/clone_no_bound.rs index eaf18a68ace1f..bd2741c0d47ab 100644 --- a/frame/support/procedural/src/clone_no_bound.rs +++ b/frame/support/procedural/src/clone_no_bound.rs @@ -92,7 +92,7 @@ pub fn derive_clone_no_bound(input: proc_macro::TokenStream) -> proc_macro::Toke }, syn::Data::Union(_) => { let msg = "Union type not supported by `derive(CloneNoBound)`"; - return syn::Error::new(input.span(), msg).to_compile_error().into(); + return syn::Error::new(input.span(), msg).to_compile_error().into() }, }; diff --git a/frame/support/procedural/src/construct_runtime/expand/event.rs b/frame/support/procedural/src/construct_runtime/expand/event.rs index 9239731878ae2..b11fcef1bfd53 100644 --- a/frame/support/procedural/src/construct_runtime/expand/event.rs +++ b/frame/support/procedural/src/construct_runtime/expand/event.rs @@ -44,7 +44,7 @@ pub fn expand_outer_event( be constructed: pallet `{}` must have generic `Event`", pallet_name, ); - return Err(syn::Error::new(pallet_name.span(), msg)); + return Err(syn::Error::new(pallet_name.span(), msg)) } let part_is_generic = !generics.params.is_empty(); diff --git a/frame/support/procedural/src/construct_runtime/expand/origin.rs b/frame/support/procedural/src/construct_runtime/expand/origin.rs index a2a115b082c29..1551d85ea4c96 100644 --- a/frame/support/procedural/src/construct_runtime/expand/origin.rs +++ b/frame/support/procedural/src/construct_runtime/expand/origin.rs @@ -45,7 +45,7 @@ pub fn expand_outer_origin( be constructed: pallet `{}` must have generic `Origin`", name ); - return Err(syn::Error::new(name.span(), msg)); + return Err(syn::Error::new(name.span(), msg)) } caller_variants.extend(expand_origin_caller_variant( diff --git a/frame/support/procedural/src/construct_runtime/mod.rs b/frame/support/procedural/src/construct_runtime/mod.rs index 9f61aa4a45dbf..9e22037a6782e 100644 --- a/frame/support/procedural/src/construct_runtime/mod.rs +++ b/frame/support/procedural/src/construct_runtime/mod.rs @@ -169,12 +169,10 @@ pub fn construct_runtime(input: TokenStream) -> TokenStream { let definition = syn::parse_macro_input!(input as RuntimeDeclaration); let res = match definition { - RuntimeDeclaration::Implicit(implicit_def) => { - construct_runtime_intermediary_expansion(input_copy.into(), implicit_def) - }, - RuntimeDeclaration::Explicit(explicit_decl) => { - construct_runtime_final_expansion(explicit_decl) - }, + RuntimeDeclaration::Implicit(implicit_def) => + construct_runtime_intermediary_expansion(input_copy.into(), implicit_def), + RuntimeDeclaration::Explicit(explicit_decl) => + construct_runtime_final_expansion(explicit_decl), }; res.unwrap_or_else(|e| e.to_compile_error()).into() @@ -232,7 +230,7 @@ fn construct_runtime_final_expansion( return Err(syn::Error::new( system_pallet.name.span(), "`System` pallet declaration is feature gated, please remove any `#[cfg]` attributes", - )); + )) } let features = pallets diff --git a/frame/support/procedural/src/construct_runtime/parse.rs b/frame/support/procedural/src/construct_runtime/parse.rs index 3865cb6befb4f..7a5acf43b92b0 100644 --- a/frame/support/procedural/src/construct_runtime/parse.rs +++ b/frame/support/procedural/src/construct_runtime/parse.rs @@ -88,21 +88,19 @@ impl Parse for RuntimeDeclaration { let pallets_token = pallets.token; match convert_pallets(pallets.content.inner.into_iter().collect())? { - PalletsConversion::Implicit(pallets) => { + PalletsConversion::Implicit(pallets) => Ok(RuntimeDeclaration::Implicit(ImplicitRuntimeDeclaration { name, where_section, pallets, - })) - }, - PalletsConversion::Explicit(pallets) => { + })), + PalletsConversion::Explicit(pallets) => Ok(RuntimeDeclaration::Explicit(ExplicitRuntimeDeclaration { name, where_section, pallets, pallets_token, - })) - }, + })), } } } @@ -123,9 +121,9 @@ impl Parse for WhereSection { definitions.push(definition); if !input.peek(Token![,]) { if !input.peek(token::Brace) { - return Err(input.error("Expected `,` or `{`")); + return Err(input.error("Expected `,` or `{`")) } - break; + break } input.parse::()?; } @@ -138,7 +136,7 @@ impl Parse for WhereSection { "`{:?}` was declared above. Please use exactly one declaration for `{:?}`.", kind, kind ); - return Err(Error::new(*kind_span, msg)); + return Err(Error::new(*kind_span, msg)) } Ok(Self { block, node_block, unchecked_extrinsic }) } @@ -168,7 +166,7 @@ impl Parse for WhereDefinition { } else if lookahead.peek(keyword::UncheckedExtrinsic) { (input.parse::()?.span(), WhereKind::UncheckedExtrinsic) } else { - return Err(lookahead.error()); + return Err(lookahead.error()) }; Ok(Self { @@ -229,12 +227,12 @@ impl Parse for PalletDeclaration { let res = Some(input.parse()?); let _: Token![>] = input.parse()?; res - } else if !(input.peek(Token![::]) && input.peek3(token::Brace)) - && !input.peek(keyword::exclude_parts) - && !input.peek(keyword::use_parts) - && !input.peek(Token![=]) - && !input.peek(Token![,]) - && !input.is_empty() + } else if !(input.peek(Token![::]) && input.peek3(token::Brace)) && + !input.peek(keyword::exclude_parts) && + !input.peek(keyword::use_parts) && + !input.peek(Token![=]) && + !input.peek(Token![,]) && + !input.is_empty() { return Err(input.error( "Unexpected tokens, expected one of `::$ident` `::{`, `exclude_parts`, `use_parts`, `=`, `,`", @@ -247,15 +245,15 @@ impl Parse for PalletDeclaration { let pallet_parts = if input.peek(Token![::]) && input.peek3(token::Brace) { let _: Token![::] = input.parse()?; Some(parse_pallet_parts(input)?) - } else if !input.peek(keyword::exclude_parts) - && !input.peek(keyword::use_parts) - && !input.peek(Token![=]) - && !input.peek(Token![,]) - && !input.is_empty() + } else if !input.peek(keyword::exclude_parts) && + !input.peek(keyword::use_parts) && + !input.peek(Token![=]) && + !input.peek(Token![,]) && + !input.is_empty() { return Err(input.error( "Unexpected tokens, expected one of `::{`, `exclude_parts`, `use_parts`, `=`, `,`", - )); + )) } else { None }; @@ -268,7 +266,7 @@ impl Parse for PalletDeclaration { let _: keyword::use_parts = input.parse()?; SpecifiedParts::Use(parse_pallet_parts_no_generic(input)?) } else if !input.peek(Token![=]) && !input.peek(Token![,]) && !input.is_empty() { - return Err(input.error("Unexpected tokens, expected one of `exclude_parts`, `=`, `,`")); + return Err(input.error("Unexpected tokens, expected one of `exclude_parts`, `=`, `,`")) } else { SpecifiedParts::All }; @@ -280,7 +278,7 @@ impl Parse for PalletDeclaration { let index = index.base10_parse::()?; Some(index) } else if !input.peek(Token![,]) && !input.is_empty() { - return Err(input.error("Unexpected tokens, expected one of `=`, `,`")); + return Err(input.error("Unexpected tokens, expected one of `=`, `,`")) } else { None }; @@ -316,15 +314,15 @@ impl Parse for PalletPath { PalletPath { inner: Path { leading_colon: None, segments: Punctuated::new() } }; let lookahead = input.lookahead1(); - if lookahead.peek(Token![crate]) - || lookahead.peek(Token![self]) - || lookahead.peek(Token![super]) - || lookahead.peek(Ident) + if lookahead.peek(Token![crate]) || + lookahead.peek(Token![self]) || + lookahead.peek(Token![super]) || + lookahead.peek(Ident) { let ident = input.call(Ident::parse_any)?; res.inner.segments.push(ident.into()); } else { - return Err(lookahead.error()); + return Err(lookahead.error()) } while input.peek(Token![::]) && input.peek3(Ident) { @@ -355,7 +353,7 @@ fn parse_pallet_parts(input: ParseStream) -> Result> { "`{}` was already declared before. Please remove the duplicate declaration", part.name(), ); - return Err(Error::new(part.keyword.span(), msg)); + return Err(Error::new(part.keyword.span(), msg)) } } @@ -460,7 +458,7 @@ impl Parse for PalletPart { keyword.name(), valid_generics, ); - return Err(syn::Error::new(keyword.span(), msg)); + return Err(syn::Error::new(keyword.span(), msg)) } Ok(Self { keyword, generics }) @@ -521,7 +519,7 @@ fn parse_pallet_parts_no_generic(input: ParseStream) -> Result) -> syn::Result { if pallets.iter().any(|pallet| pallet.pallet_parts.is_none()) { - return Ok(PalletsConversion::Implicit(pallets)); + return Ok(PalletsConversion::Implicit(pallets)) } let mut indices = HashMap::new(); @@ -602,7 +600,7 @@ fn convert_pallets(pallets: Vec) -> syn::Result) -> syn::Result) -> syn::Result { + SpecifiedParts::Exclude(parts) | SpecifiedParts::Use(parts) => for part in parts { if !available_parts.contains(part.keyword.name()) { let msg = format!( @@ -636,10 +634,9 @@ fn convert_pallets(pallets: Vec) -> syn::Result (), } @@ -663,7 +660,7 @@ fn convert_pallets(pallets: Vec) -> syn::Result Error { /// Implementation of the `crate_to_crate_version!` macro. pub fn crate_to_crate_version(input: proc_macro::TokenStream) -> Result { if !input.is_empty() { - return Err(create_error("No arguments expected!")); + return Err(create_error("No arguments expected!")) } let major_version = get_cargo_env_var::("CARGO_PKG_VERSION_MAJOR") diff --git a/frame/support/procedural/src/debug_no_bound.rs b/frame/support/procedural/src/debug_no_bound.rs index dae23c539d528..56168edb87e83 100644 --- a/frame/support/procedural/src/debug_no_bound.rs +++ b/frame/support/procedural/src/debug_no_bound.rs @@ -106,7 +106,7 @@ pub fn derive_debug_no_bound(input: proc_macro::TokenStream) -> proc_macro::Toke }, syn::Data::Union(_) => { let msg = "Union type not supported by `derive(DebugNoBound)`"; - return syn::Error::new(input.span(), msg).to_compile_error().into(); + return syn::Error::new(input.span(), msg).to_compile_error().into() }, }; diff --git a/frame/support/procedural/src/dummy_part_checker.rs b/frame/support/procedural/src/dummy_part_checker.rs index 0377fb04e778d..792b17a8f7758 100644 --- a/frame/support/procedural/src/dummy_part_checker.rs +++ b/frame/support/procedural/src/dummy_part_checker.rs @@ -5,7 +5,7 @@ pub fn generate_dummy_part_checker(input: TokenStream) -> TokenStream { if !input.is_empty() { return syn::Error::new(proc_macro2::Span::call_site(), "No arguments expected") .to_compile_error() - .into(); + .into() } let count = COUNTER.with(|counter| counter.borrow_mut().inc()); diff --git a/frame/support/procedural/src/key_prefix.rs b/frame/support/procedural/src/key_prefix.rs index 086c9f8c8dcf6..05582f1297eed 100644 --- a/frame/support/procedural/src/key_prefix.rs +++ b/frame/support/procedural/src/key_prefix.rs @@ -23,7 +23,7 @@ const MAX_IDENTS: usize = 18; pub fn impl_key_prefix_for_tuples(input: proc_macro::TokenStream) -> Result { if !input.is_empty() { - return Err(syn::Error::new(Span::call_site(), "No arguments expected")); + return Err(syn::Error::new(Span::call_site(), "No arguments expected")) } let mut all_trait_impls = TokenStream::new(); diff --git a/frame/support/procedural/src/match_and_insert.rs b/frame/support/procedural/src/match_and_insert.rs index 97630ccbae57c..79d1da7549c1d 100644 --- a/frame/support/procedural/src/match_and_insert.rs +++ b/frame/support/procedural/src/match_and_insert.rs @@ -64,14 +64,14 @@ impl syn::parse::Parse for MatchAndInsertDef { let pattern = pattern.parse::()?.into_iter().collect::>(); if let Some(t) = pattern.iter().find(|t| matches!(t, TokenTree::Group(_))) { - return Err(syn::Error::new(t.span(), "Unexpected group token tree")); + return Err(syn::Error::new(t.span(), "Unexpected group token tree")) } if let Some(t) = pattern.iter().find(|t| matches!(t, TokenTree::Literal(_))) { - return Err(syn::Error::new(t.span(), "Unexpected literal token tree")); + return Err(syn::Error::new(t.span(), "Unexpected literal token tree")) } if pattern.is_empty() { - return Err(syn::Error::new(Span::call_site(), "empty match pattern is invalid")); + return Err(syn::Error::new(Span::call_site(), "empty match pattern is invalid")) } let mut tokens; @@ -116,7 +116,7 @@ fn expand_in_stream( Ok(s) => { extended.extend(once(TokenTree::Group(Group::new(group.delimiter(), s)))); extended.extend(stream); - return Ok(extended); + return Ok(extended) }, Err(_) => { extended.extend(once(TokenTree::Group(group))); @@ -132,7 +132,7 @@ fn expand_in_stream( extended .extend(once(tokens.take().expect("tokens is used to replace only once"))); extended.extend(stream); - return Ok(extended); + return Ok(extended) } }, } diff --git a/frame/support/procedural/src/pallet/expand/error.rs b/frame/support/procedural/src/pallet/expand/error.rs index 2df42097f6df9..5a8487b09de5c 100644 --- a/frame/support/procedural/src/pallet/expand/error.rs +++ b/frame/support/procedural/src/pallet/expand/error.rs @@ -51,7 +51,7 @@ pub fn expand_error(def: &mut Def) -> proc_macro2::TokenStream { } pub use #error_token_unique_id as tt_error_token; - }; + } }; let error_ident = &error.error; diff --git a/frame/support/procedural/src/pallet/expand/event.rs b/frame/support/procedural/src/pallet/expand/event.rs index f9ce2b4992c3c..abed680eb245e 100644 --- a/frame/support/procedural/src/pallet/expand/event.rs +++ b/frame/support/procedural/src/pallet/expand/event.rs @@ -55,7 +55,7 @@ pub fn expand_event(def: &mut Def) -> proc_macro2::TokenStream { #[doc(hidden)] pub use #macro_ident as is_event_part_defined; } - }; + } }; let event_where_clause = &event.where_clause; diff --git a/frame/support/procedural/src/pallet/expand/genesis_build.rs b/frame/support/procedural/src/pallet/expand/genesis_build.rs index 96ffdad8daab7..d19476779011b 100644 --- a/frame/support/procedural/src/pallet/expand/genesis_build.rs +++ b/frame/support/procedural/src/pallet/expand/genesis_build.rs @@ -24,7 +24,7 @@ pub fn expand_genesis_build(def: &mut Def) -> proc_macro2::TokenStream { let genesis_config = if let Some(genesis_config) = &def.genesis_config { genesis_config } else { - return Default::default(); + return Default::default() }; let genesis_build = def.genesis_build.as_ref().expect("Checked by def parser"); diff --git a/frame/support/procedural/src/pallet/expand/genesis_config.rs b/frame/support/procedural/src/pallet/expand/genesis_config.rs index 4b2ff94c6887f..739e85e0d1ced 100644 --- a/frame/support/procedural/src/pallet/expand/genesis_config.rs +++ b/frame/support/procedural/src/pallet/expand/genesis_config.rs @@ -71,7 +71,7 @@ pub fn expand_genesis_config(def: &mut Def) -> proc_macro2::TokenStream { #[doc(hidden)] pub use #std_macro_ident as is_std_enabled_for_genesis; } - }; + } }; let frame_support = &def.frame_support; @@ -82,9 +82,9 @@ pub fn expand_genesis_config(def: &mut Def) -> proc_macro2::TokenStream { let serde_crate = format!("{}::serde", frame_support); match genesis_config_item { - syn::Item::Enum(syn::ItemEnum { attrs, .. }) - | syn::Item::Struct(syn::ItemStruct { attrs, .. }) - | syn::Item::Type(syn::ItemType { attrs, .. }) => { + syn::Item::Enum(syn::ItemEnum { attrs, .. }) | + syn::Item::Struct(syn::ItemStruct { attrs, .. }) | + syn::Item::Type(syn::ItemType { attrs, .. }) => { if get_doc_literals(attrs).is_empty() { attrs.push(syn::parse_quote!( #[doc = r" diff --git a/frame/support/procedural/src/pallet/expand/storage.rs b/frame/support/procedural/src/pallet/expand/storage.rs index cc656c342d254..9109640966969 100644 --- a/frame/support/procedural/src/pallet/expand/storage.rs +++ b/frame/support/procedural/src/pallet/expand/storage.rs @@ -62,7 +62,7 @@ fn check_prefix_duplicates( if let Some(other_dup_err) = used_prefixes.insert(prefix.clone(), dup_err.clone()) { let mut err = dup_err; err.combine(other_dup_err); - return Err(err); + return Err(err) } if let Metadata::CountedMap { .. } = storage_def.metadata { @@ -79,7 +79,7 @@ fn check_prefix_duplicates( if let Some(other_dup_err) = used_prefixes.insert(counter_prefix, counter_dup_err.clone()) { let mut err = counter_dup_err; err.combine(other_dup_err); - return Err(err); + return Err(err) } } @@ -152,7 +152,7 @@ pub fn process_generics(def: &mut Def) -> syn::Result syn::Result syn::Result= args.args.len() - && matches!(storage_def.query_kind.as_ref(), Some(QueryKind::ResultQuery(_, _))) + if on_empty_idx >= args.args.len() && + matches!(storage_def.query_kind.as_ref(), Some(QueryKind::ResultQuery(_, _))) { let value_ty = match args.args[value_idx].clone() { syn::GenericArgument::Type(ty) => ty, @@ -321,7 +321,7 @@ pub fn expand_storages(def: &mut Def) -> proc_macro2::TokenStream { .filter_map(|storage_def| check_prefix_duplicates(storage_def, &mut prefix_set).err()); if let Some(mut final_error) = errors.next() { errors.for_each(|error| final_error.combine(error)); - return final_error.into_compile_error(); + return final_error.into_compile_error() } let frame_support = &def.frame_support; diff --git a/frame/support/procedural/src/pallet/mod.rs b/frame/support/procedural/src/pallet/mod.rs index 4d7b823178292..3f85be81c1f7d 100644 --- a/frame/support/procedural/src/pallet/mod.rs +++ b/frame/support/procedural/src/pallet/mod.rs @@ -49,7 +49,7 @@ pub fn pallet( `dev_mode` attribute, such as `#[frame_support::pallet(dev_mode)]` or \ #[pallet(dev_mode)]."; let span = proc_macro2::TokenStream::from(attr).span(); - return syn::Error::new(span, msg).to_compile_error().into(); + return syn::Error::new(span, msg).to_compile_error().into() } } diff --git a/frame/support/procedural/src/pallet/parse/call.rs b/frame/support/procedural/src/pallet/parse/call.rs index ba9026c758425..fbca9a52c767c 100644 --- a/frame/support/procedural/src/pallet/parse/call.rs +++ b/frame/support/procedural/src/pallet/parse/call.rs @@ -149,7 +149,7 @@ impl CallDef { let item_impl = if let syn::Item::Impl(item) = item { item } else { - return Err(syn::Error::new(item.span(), "Invalid pallet::call, expected item impl")); + return Err(syn::Error::new(item.span(), "Invalid pallet::call, expected item impl")) }; let instances = vec![ @@ -160,7 +160,7 @@ impl CallDef { if let Some((_, _, for_)) = item_impl.trait_ { let msg = "Invalid pallet::call, expected no trait ident as in \ `impl<..> Pallet<..> { .. }`"; - return Err(syn::Error::new(for_.span(), msg)); + return Err(syn::Error::new(for_.span(), msg)) } let mut methods = vec![]; @@ -177,18 +177,18 @@ impl CallDef { _ => method.vis.span(), }; - return Err(syn::Error::new(span, msg)); + return Err(syn::Error::new(span, msg)) } match method.sig.inputs.first() { None => { let msg = "Invalid pallet::call, must have at least origin arg"; - return Err(syn::Error::new(method.sig.span(), msg)); + return Err(syn::Error::new(method.sig.span(), msg)) }, Some(syn::FnArg::Receiver(_)) => { let msg = "Invalid pallet::call, first argument must be a typed argument, \ e.g. `origin: OriginFor`"; - return Err(syn::Error::new(method.sig.span(), msg)); + return Err(syn::Error::new(method.sig.span(), msg)) }, Some(syn::FnArg::Typed(arg)) => { check_dispatchable_first_arg_type(&arg.ty)?; @@ -200,7 +200,7 @@ impl CallDef { } else { let msg = "Invalid pallet::call, require return type \ DispatchResultWithPostInfo"; - return Err(syn::Error::new(method.sig.span(), msg)); + return Err(syn::Error::new(method.sig.span(), msg)) } let (mut weight_attrs, mut call_idx_attrs): (Vec, Vec) = @@ -228,7 +228,7 @@ impl CallDef { } else { "Invalid pallet::call, too many weight attributes given" }; - return Err(syn::Error::new(method.sig.span(), msg)); + return Err(syn::Error::new(method.sig.span(), msg)) } let weight = match weight_attrs.pop().unwrap() { FunctionAttr::Weight(w) => w, @@ -237,7 +237,7 @@ impl CallDef { if call_idx_attrs.len() > 1 { let msg = "Invalid pallet::call, too many call_index attributes given"; - return Err(syn::Error::new(method.sig.span(), msg)); + return Err(syn::Error::new(method.sig.span(), msg)) } let call_index = call_idx_attrs.pop().map(|attr| match attr { FunctionAttr::CallIndex(idx) => idx, @@ -246,12 +246,11 @@ impl CallDef { let final_index = match call_index { Some(i) => i, - None => { + None => last_index.map_or(Some(0), |idx| idx.checked_add(1)).ok_or_else(|| { let msg = "Call index doesn't fit into u8, index is 256"; syn::Error::new(method.sig.span(), msg) - })? - }, + })?, }; last_index = Some(final_index); @@ -262,7 +261,7 @@ impl CallDef { ); let mut err = syn::Error::new(used_fn.span(), &msg); err.combine(syn::Error::new(method.sig.ident.span(), msg)); - return Err(err); + return Err(err) } let mut args = vec![]; @@ -278,14 +277,14 @@ impl CallDef { if arg_attrs.len() > 1 { let msg = "Invalid pallet::call, argument has too many attributes"; - return Err(syn::Error::new(arg.span(), msg)); + return Err(syn::Error::new(arg.span(), msg)) } let arg_ident = if let syn::Pat::Ident(pat) = &*arg.pat { pat.ident.clone() } else { let msg = "Invalid pallet::call, argument must be ident"; - return Err(syn::Error::new(arg.pat.span(), msg)); + return Err(syn::Error::new(arg.pat.span(), msg)) }; args.push((!arg_attrs.is_empty(), arg_ident, arg.ty.clone())); @@ -303,7 +302,7 @@ impl CallDef { }); } else { let msg = "Invalid pallet::call, only method accepted"; - return Err(syn::Error::new(item.span(), msg)); + return Err(syn::Error::new(item.span(), msg)) } } diff --git a/frame/support/procedural/src/pallet/parse/config.rs b/frame/support/procedural/src/pallet/parse/config.rs index faf4d7d1acada..0f3aa69b170ce 100644 --- a/frame/support/procedural/src/pallet/parse/config.rs +++ b/frame/support/procedural/src/pallet/parse/config.rs @@ -227,7 +227,7 @@ fn check_event_type( if !type_.generics.params.is_empty() || type_.generics.where_clause.is_some() { let msg = "Invalid `type RuntimeEvent`, associated type `RuntimeEvent` is reserved and must have\ no generics nor where_clause"; - return Err(syn::Error::new(trait_item.span(), msg)); + return Err(syn::Error::new(trait_item.span(), msg)) } // Check bound contains IsType and From @@ -242,7 +242,7 @@ fn check_event_type( bound: `IsType<::RuntimeEvent>`", frame_system, ); - return Err(syn::Error::new(type_.span(), msg)); + return Err(syn::Error::new(type_.span(), msg)) } let from_event_bound = type_ @@ -255,7 +255,7 @@ fn check_event_type( } else { let msg = "Invalid `type RuntimeEvent`, associated type `RuntimeEvent` is reserved and must \ bound: `From` or `From>` or `From>`"; - return Err(syn::Error::new(type_.span(), msg)); + return Err(syn::Error::new(type_.span(), msg)) }; if from_event_bound.is_generic && (from_event_bound.has_instance != trait_has_instance) @@ -263,7 +263,7 @@ fn check_event_type( let msg = "Invalid `type RuntimeEvent`, associated type `RuntimeEvent` bounds inconsistent \ `From`. Config and generic Event must be both with instance or \ without instance"; - return Err(syn::Error::new(type_.span(), msg)); + return Err(syn::Error::new(type_.span(), msg)) } Ok(true) @@ -280,12 +280,10 @@ pub fn replace_self_by_t(input: proc_macro2::TokenStream) -> proc_macro2::TokenS input .into_iter() .map(|token_tree| match token_tree { - proc_macro2::TokenTree::Group(group) => { - proc_macro2::Group::new(group.delimiter(), replace_self_by_t(group.stream())).into() - }, - proc_macro2::TokenTree::Ident(ident) if ident == "Self" => { - proc_macro2::Ident::new("T", ident.span()).into() - }, + proc_macro2::TokenTree::Group(group) => + proc_macro2::Group::new(group.delimiter(), replace_self_by_t(group.stream())).into(), + proc_macro2::TokenTree::Ident(ident) if ident == "Self" => + proc_macro2::Ident::new("T", ident.span()).into(), other => other, }) .collect() @@ -302,12 +300,12 @@ impl ConfigDef { item } else { let msg = "Invalid pallet::config, expected trait definition"; - return Err(syn::Error::new(item.span(), msg)); + return Err(syn::Error::new(item.span(), msg)) }; if !matches!(item.vis, syn::Visibility::Public(_)) { let msg = "Invalid pallet::config, trait must be public"; - return Err(syn::Error::new(item.span(), msg)); + return Err(syn::Error::new(item.span(), msg)) } syn::parse2::(item.ident.to_token_stream())?; @@ -322,7 +320,7 @@ impl ConfigDef { if item.generics.params.len() > 1 { let msg = "Invalid pallet::config, expected no more than one generic"; - return Err(syn::Error::new(item.generics.params[2].span(), msg)); + return Err(syn::Error::new(item.generics.params[2].span(), msg)) } let has_instance = if item.generics.params.first().is_some() { @@ -344,7 +342,7 @@ impl ConfigDef { if type_attrs_const.len() > 1 { let msg = "Invalid attribute in pallet::config, only one attribute is expected"; - return Err(syn::Error::new(type_attrs_const[1].span(), msg)); + return Err(syn::Error::new(type_attrs_const[1].span(), msg)) } if type_attrs_const.len() == 1 { @@ -357,7 +355,7 @@ impl ConfigDef { let msg = "Invalid pallet::constant in pallet::config, expected type trait \ item"; - return Err(syn::Error::new(trait_item.span(), msg)); + return Err(syn::Error::new(trait_item.span(), msg)) }, } } @@ -394,7 +392,7 @@ impl ConfigDef { To disable this check, use `#[pallet::disable_frame_system_supertrait_check]`", frame_system, found, ); - return Err(syn::Error::new(item.span(), msg)); + return Err(syn::Error::new(item.span(), msg)) } Ok(Self { index, has_instance, consts_metadata, has_event_type, where_clause, attr_span }) diff --git a/frame/support/procedural/src/pallet/parse/error.rs b/frame/support/procedural/src/pallet/parse/error.rs index 15c06706e44ef..c6ce9b37c75a2 100644 --- a/frame/support/procedural/src/pallet/parse/error.rs +++ b/frame/support/procedural/src/pallet/parse/error.rs @@ -55,11 +55,11 @@ impl ErrorDef { let item = if let syn::Item::Enum(item) = item { item } else { - return Err(syn::Error::new(item.span(), "Invalid pallet::error, expected item enum")); + return Err(syn::Error::new(item.span(), "Invalid pallet::error, expected item enum")) }; if !matches!(item.vis, syn::Visibility::Public(_)) { let msg = "Invalid pallet::error, `Error` must be public"; - return Err(syn::Error::new(item.span(), msg)); + return Err(syn::Error::new(item.span(), msg)) } let instances = @@ -67,7 +67,7 @@ impl ErrorDef { if item.generics.where_clause.is_some() { let msg = "Invalid pallet::error, where clause is not allowed on pallet error item"; - return Err(syn::Error::new(item.generics.where_clause.as_ref().unwrap().span(), msg)); + return Err(syn::Error::new(item.generics.where_clause.as_ref().unwrap().span(), msg)) } let error = syn::parse2::(item.ident.to_token_stream())?; @@ -85,7 +85,7 @@ impl ErrorDef { let msg = "Invalid pallet::error, unexpected discriminant, discriminants \ are not supported"; let span = variant.discriminant.as_ref().unwrap().0.span(); - return Err(syn::Error::new(span, msg)); + return Err(syn::Error::new(span, msg)) } Ok((variant.ident.clone(), field_ty, get_doc_literals(&variant.attrs))) diff --git a/frame/support/procedural/src/pallet/parse/event.rs b/frame/support/procedural/src/pallet/parse/event.rs index 5fbd2af711841..e046cacac88e8 100644 --- a/frame/support/procedural/src/pallet/parse/event.rs +++ b/frame/support/procedural/src/pallet/parse/event.rs @@ -87,7 +87,7 @@ impl PalletEventAttrInfo { if deposit_event.is_none() { deposit_event = Some(attr) } else { - return Err(syn::Error::new(attr.span, "Duplicate attribute")); + return Err(syn::Error::new(attr.span, "Duplicate attribute")) } } @@ -104,7 +104,7 @@ impl EventDef { let item = if let syn::Item::Enum(item) = item { item } else { - return Err(syn::Error::new(item.span(), "Invalid pallet::event, expected item enum")); + return Err(syn::Error::new(item.span(), "Invalid pallet::event, expected item enum")) }; let event_attrs: Vec = @@ -114,7 +114,7 @@ impl EventDef { if !matches!(item.vis, syn::Visibility::Public(_)) { let msg = "Invalid pallet::event, `Event` must be public"; - return Err(syn::Error::new(item.span(), msg)); + return Err(syn::Error::new(item.span(), msg)) } let where_clause = item.generics.where_clause.clone(); diff --git a/frame/support/procedural/src/pallet/parse/extra_constants.rs b/frame/support/procedural/src/pallet/parse/extra_constants.rs index f9592d61f7ee1..d8622da08461b 100644 --- a/frame/support/procedural/src/pallet/parse/extra_constants.rs +++ b/frame/support/procedural/src/pallet/parse/extra_constants.rs @@ -84,7 +84,7 @@ impl ExtraConstantsDef { return Err(syn::Error::new( item.span(), "Invalid pallet::extra_constants, expected item impl", - )); + )) }; let instances = vec![ @@ -95,7 +95,7 @@ impl ExtraConstantsDef { if let Some((_, _, for_)) = item.trait_ { let msg = "Invalid pallet::call, expected no trait ident as in \ `impl<..> Pallet<..> { .. }`"; - return Err(syn::Error::new(for_.span(), msg)); + return Err(syn::Error::new(for_.span(), msg)) } let mut extra_constants = vec![]; @@ -104,28 +104,28 @@ impl ExtraConstantsDef { method } else { let msg = "Invalid pallet::call, only method accepted"; - return Err(syn::Error::new(impl_item.span(), msg)); + return Err(syn::Error::new(impl_item.span(), msg)) }; if !method.sig.inputs.is_empty() { let msg = "Invalid pallet::extra_constants, method must have 0 args"; - return Err(syn::Error::new(method.sig.span(), msg)); + return Err(syn::Error::new(method.sig.span(), msg)) } if !method.sig.generics.params.is_empty() { let msg = "Invalid pallet::extra_constants, method must have 0 generics"; - return Err(syn::Error::new(method.sig.generics.params[0].span(), msg)); + return Err(syn::Error::new(method.sig.generics.params[0].span(), msg)) } if method.sig.generics.where_clause.is_some() { let msg = "Invalid pallet::extra_constants, method must have no where clause"; - return Err(syn::Error::new(method.sig.generics.where_clause.span(), msg)); + return Err(syn::Error::new(method.sig.generics.where_clause.span(), msg)) } let type_ = match &method.sig.output { syn::ReturnType::Default => { let msg = "Invalid pallet::extra_constants, method must have a return type"; - return Err(syn::Error::new(method.span(), msg)); + return Err(syn::Error::new(method.span(), msg)) }, syn::ReturnType::Type(_, type_) => *type_.clone(), }; @@ -137,7 +137,7 @@ impl ExtraConstantsDef { if extra_constant_attrs.len() > 1 { let msg = "Invalid attribute in pallet::constant_name, only one attribute is expected"; - return Err(syn::Error::new(extra_constant_attrs[1].metadata_name.span(), msg)); + return Err(syn::Error::new(extra_constant_attrs[1].metadata_name.span(), msg)) } let metadata_name = extra_constant_attrs.pop().map(|attr| attr.metadata_name); diff --git a/frame/support/procedural/src/pallet/parse/genesis_build.rs b/frame/support/procedural/src/pallet/parse/genesis_build.rs index 6b033fdd0f331..9815b8d2203c4 100644 --- a/frame/support/procedural/src/pallet/parse/genesis_build.rs +++ b/frame/support/procedural/src/pallet/parse/genesis_build.rs @@ -40,7 +40,7 @@ impl GenesisBuildDef { item } else { let msg = "Invalid pallet::genesis_build, expected item impl"; - return Err(syn::Error::new(item.span(), msg)); + return Err(syn::Error::new(item.span(), msg)) }; let item_trait = &item diff --git a/frame/support/procedural/src/pallet/parse/genesis_config.rs b/frame/support/procedural/src/pallet/parse/genesis_config.rs index e977b831cc32a..45e765c018aae 100644 --- a/frame/support/procedural/src/pallet/parse/genesis_config.rs +++ b/frame/support/procedural/src/pallet/parse/genesis_config.rs @@ -42,7 +42,7 @@ impl GenesisConfigDef { syn::Item::Struct(item) => (&item.vis, &item.ident, &item.generics), _ => { let msg = "Invalid pallet::genesis_config, expected enum or struct"; - return Err(syn::Error::new(item.span(), msg)); + return Err(syn::Error::new(item.span(), msg)) }, }; @@ -60,12 +60,12 @@ impl GenesisConfigDef { if !matches!(vis, syn::Visibility::Public(_)) { let msg = "Invalid pallet::genesis_config, GenesisConfig must be public"; - return Err(syn::Error::new(item_span, msg)); + return Err(syn::Error::new(item_span, msg)) } if ident != "GenesisConfig" { let msg = "Invalid pallet::genesis_config, ident must `GenesisConfig`"; - return Err(syn::Error::new(ident.span(), msg)); + return Err(syn::Error::new(ident.span(), msg)) } Ok(GenesisConfigDef { index, genesis_config: ident.clone(), instances, gen_kind }) diff --git a/frame/support/procedural/src/pallet/parse/helper.rs b/frame/support/procedural/src/pallet/parse/helper.rs index c93ac4faca9b1..8244079173581 100644 --- a/frame/support/procedural/src/pallet/parse/helper.rs +++ b/frame/support/procedural/src/pallet/parse/helper.rs @@ -153,7 +153,7 @@ impl syn::parse::Parse for Unit { syn::parenthesized!(content in input); if !content.is_empty() { let msg = "unexpected tokens, expected nothing inside parenthesis as `()`"; - return Err(syn::Error::new(content.span(), msg)); + return Err(syn::Error::new(content.span(), msg)) } Ok(Self) } @@ -166,7 +166,7 @@ impl syn::parse::Parse for StaticLifetime { let lifetime = input.parse::()?; if lifetime.ident != "static" { let msg = "unexpected tokens, expected `static`"; - return Err(syn::Error::new(lifetime.ident.span(), msg)); + return Err(syn::Error::new(lifetime.ident.span(), msg)) } Ok(Self) } @@ -264,7 +264,7 @@ pub fn check_type_def_optional_gen( impl syn::parse::Parse for Checker { fn parse(input: syn::parse::ParseStream) -> syn::Result { if input.is_empty() { - return Ok(Self(None)); + return Ok(Self(None)) } let mut instance_usage = InstanceUsage { span: input.span(), has_instance: false }; @@ -272,7 +272,7 @@ pub fn check_type_def_optional_gen( input.parse::()?; if input.is_empty() { - return Ok(Self(Some(instance_usage))); + return Ok(Self(Some(instance_usage))) } let lookahead = input.lookahead1(); @@ -289,7 +289,7 @@ pub fn check_type_def_optional_gen( input.parse::()?; if input.is_empty() { - return Ok(Self(Some(instance_usage))); + return Ok(Self(Some(instance_usage))) } instance_usage.has_instance = true; @@ -431,7 +431,7 @@ pub fn check_type_def_gen( input.parse::()?; if input.is_empty() { - return Ok(Self(instance_usage)); + return Ok(Self(instance_usage)) } let lookahead = input.lookahead1(); @@ -448,7 +448,7 @@ pub fn check_type_def_gen( input.parse::()?; if input.is_empty() { - return Ok(Self(instance_usage)); + return Ok(Self(instance_usage)) } instance_usage.has_instance = true; @@ -539,7 +539,7 @@ pub fn check_type_value_gen( impl syn::parse::Parse for Checker { fn parse(input: syn::parse::ParseStream) -> syn::Result { if input.is_empty() { - return Ok(Self(None)); + return Ok(Self(None)) } input.parse::()?; @@ -549,7 +549,7 @@ pub fn check_type_value_gen( let mut instance_usage = InstanceUsage { span: input.span(), has_instance: false }; if input.is_empty() { - return Ok(Self(Some(instance_usage))); + return Ok(Self(Some(instance_usage))) } instance_usage.has_instance = true; diff --git a/frame/support/procedural/src/pallet/parse/hooks.rs b/frame/support/procedural/src/pallet/parse/hooks.rs index 4917f26a5f01b..2dc8f4da47c5f 100644 --- a/frame/support/procedural/src/pallet/parse/hooks.rs +++ b/frame/support/procedural/src/pallet/parse/hooks.rs @@ -42,7 +42,7 @@ impl HooksDef { item } else { let msg = "Invalid pallet::hooks, expected item impl"; - return Err(syn::Error::new(item.span(), msg)); + return Err(syn::Error::new(item.span(), msg)) }; let instances = vec![ @@ -67,7 +67,7 @@ impl HooksDef { quote::quote!(#item_trait) ); - return Err(syn::Error::new(item_trait.span(), msg)); + return Err(syn::Error::new(item_trait.span(), msg)) } let has_runtime_upgrade = item.items.iter().any(|i| match i { diff --git a/frame/support/procedural/src/pallet/parse/inherent.rs b/frame/support/procedural/src/pallet/parse/inherent.rs index 6b7e3a402856e..a485eed4c40d9 100644 --- a/frame/support/procedural/src/pallet/parse/inherent.rs +++ b/frame/support/procedural/src/pallet/parse/inherent.rs @@ -32,22 +32,22 @@ impl InherentDef { item } else { let msg = "Invalid pallet::inherent, expected item impl"; - return Err(syn::Error::new(item.span(), msg)); + return Err(syn::Error::new(item.span(), msg)) }; if item.trait_.is_none() { let msg = "Invalid pallet::inherent, expected impl<..> ProvideInherent for Pallet<..>"; - return Err(syn::Error::new(item.span(), msg)); + return Err(syn::Error::new(item.span(), msg)) } if let Some(last) = item.trait_.as_ref().unwrap().1.segments.last() { if last.ident != "ProvideInherent" { let msg = "Invalid pallet::inherent, expected trait ProvideInherent"; - return Err(syn::Error::new(last.span(), msg)); + return Err(syn::Error::new(last.span(), msg)) } } else { let msg = "Invalid pallet::inherent, expected impl<..> ProvideInherent for Pallet<..>"; - return Err(syn::Error::new(item.span(), msg)); + return Err(syn::Error::new(item.span(), msg)) } let instances = vec![ diff --git a/frame/support/procedural/src/pallet/parse/mod.rs b/frame/support/procedural/src/pallet/parse/mod.rs index e484b11d54e7d..f91159248281c 100644 --- a/frame/support/procedural/src/pallet/parse/mod.rs +++ b/frame/support/procedural/src/pallet/parse/mod.rs @@ -96,9 +96,8 @@ impl Def { let pallet_attr: Option = helper::take_first_item_pallet_attr(item)?; match pallet_attr { - Some(PalletAttr::Config(span)) if config.is_none() => { - config = Some(config::ConfigDef::try_from(&frame_system, span, index, item)?) - }, + Some(PalletAttr::Config(span)) if config.is_none() => + config = Some(config::ConfigDef::try_from(&frame_system, span, index, item)?), Some(PalletAttr::Pallet(span)) if pallet_struct.is_none() => { let p = pallet_struct::PalletStructDef::try_from(span, index, item)?; pallet_struct = Some(p); @@ -107,15 +106,12 @@ impl Def { let m = hooks::HooksDef::try_from(span, index, item)?; hooks = Some(m); }, - Some(PalletAttr::RuntimeCall(span)) if call.is_none() => { - call = Some(call::CallDef::try_from(span, index, item, dev_mode)?) - }, - Some(PalletAttr::Error(span)) if error.is_none() => { - error = Some(error::ErrorDef::try_from(span, index, item)?) - }, - Some(PalletAttr::RuntimeEvent(span)) if event.is_none() => { - event = Some(event::EventDef::try_from(span, index, item)?) - }, + Some(PalletAttr::RuntimeCall(span)) if call.is_none() => + call = Some(call::CallDef::try_from(span, index, item, dev_mode)?), + Some(PalletAttr::Error(span)) if error.is_none() => + error = Some(error::ErrorDef::try_from(span, index, item)?), + Some(PalletAttr::RuntimeEvent(span)) if event.is_none() => + event = Some(event::EventDef::try_from(span, index, item)?), Some(PalletAttr::GenesisConfig(_)) if genesis_config.is_none() => { let g = genesis_config::GenesisConfigDef::try_from(index, item)?; genesis_config = Some(g); @@ -124,29 +120,24 @@ impl Def { let g = genesis_build::GenesisBuildDef::try_from(span, index, item)?; genesis_build = Some(g); }, - Some(PalletAttr::RuntimeOrigin(_)) if origin.is_none() => { - origin = Some(origin::OriginDef::try_from(index, item)?) - }, - Some(PalletAttr::Inherent(_)) if inherent.is_none() => { - inherent = Some(inherent::InherentDef::try_from(index, item)?) - }, - Some(PalletAttr::Storage(span)) => { - storages.push(storage::StorageDef::try_from(span, index, item, dev_mode)?) - }, + Some(PalletAttr::RuntimeOrigin(_)) if origin.is_none() => + origin = Some(origin::OriginDef::try_from(index, item)?), + Some(PalletAttr::Inherent(_)) if inherent.is_none() => + inherent = Some(inherent::InherentDef::try_from(index, item)?), + Some(PalletAttr::Storage(span)) => + storages.push(storage::StorageDef::try_from(span, index, item, dev_mode)?), Some(PalletAttr::ValidateUnsigned(_)) if validate_unsigned.is_none() => { let v = validate_unsigned::ValidateUnsignedDef::try_from(index, item)?; validate_unsigned = Some(v); }, - Some(PalletAttr::TypeValue(span)) => { - type_values.push(type_value::TypeValueDef::try_from(span, index, item)?) - }, - Some(PalletAttr::ExtraConstants(_)) => { + Some(PalletAttr::TypeValue(span)) => + type_values.push(type_value::TypeValueDef::try_from(span, index, item)?), + Some(PalletAttr::ExtraConstants(_)) => extra_constants = - Some(extra_constants::ExtraConstantsDef::try_from(index, item)?) - }, + Some(extra_constants::ExtraConstantsDef::try_from(index, item)?), Some(attr) => { let msg = "Invalid duplicated attribute"; - return Err(syn::Error::new(attr.span(), msg)); + return Err(syn::Error::new(attr.span(), msg)) }, None => (), } @@ -160,7 +151,7 @@ impl Def { genesis_config.as_ref().map_or("unused", |_| "used"), genesis_build.as_ref().map_or("unused", |_| "used"), ); - return Err(syn::Error::new(item_span, msg)); + return Err(syn::Error::new(item_span, msg)) } let def = Def { @@ -248,7 +239,7 @@ impl Def { let mut errors = instances.into_iter().filter_map(|instances| { if instances.has_instance == self.config.has_instance { - return None; + return None } let msg = if self.config.has_instance { "Invalid generic declaration, trait is defined with instance but generic use none" diff --git a/frame/support/procedural/src/pallet/parse/origin.rs b/frame/support/procedural/src/pallet/parse/origin.rs index 9bda22c3f2e2d..89929e3e8dbfc 100644 --- a/frame/support/procedural/src/pallet/parse/origin.rs +++ b/frame/support/procedural/src/pallet/parse/origin.rs @@ -42,7 +42,7 @@ impl OriginDef { syn::Item::Type(item) => (&item.vis, &item.ident, &item.generics), _ => { let msg = "Invalid pallet::origin, expected enum or struct or type"; - return Err(syn::Error::new(item.span(), msg)); + return Err(syn::Error::new(item.span(), msg)) }, }; @@ -59,12 +59,12 @@ impl OriginDef { if !matches!(vis, syn::Visibility::Public(_)) { let msg = "Invalid pallet::origin, Origin must be public"; - return Err(syn::Error::new(item_span, msg)); + return Err(syn::Error::new(item_span, msg)) } if ident != "Origin" { let msg = "Invalid pallet::origin, ident must `Origin`"; - return Err(syn::Error::new(ident.span(), msg)); + return Err(syn::Error::new(ident.span(), msg)) } Ok(OriginDef { index, has_instance, is_generic, instances }) diff --git a/frame/support/procedural/src/pallet/parse/pallet_struct.rs b/frame/support/procedural/src/pallet/parse/pallet_struct.rs index 9b79812e0a7c9..a96c310b6f1ca 100644 --- a/frame/support/procedural/src/pallet/parse/pallet_struct.rs +++ b/frame/support/procedural/src/pallet/parse/pallet_struct.rs @@ -113,7 +113,7 @@ impl PalletStructDef { item } else { let msg = "Invalid pallet::pallet, expected struct definition"; - return Err(syn::Error::new(item.span(), msg)); + return Err(syn::Error::new(item.span(), msg)) }; let mut store = None; @@ -138,7 +138,7 @@ impl PalletStructDef { }, attr => { let msg = "Unexpected duplicated attribute"; - return Err(syn::Error::new(attr.span(), msg)); + return Err(syn::Error::new(attr.span(), msg)) }, } } @@ -147,12 +147,12 @@ impl PalletStructDef { if !matches!(item.vis, syn::Visibility::Public(_)) { let msg = "Invalid pallet::pallet, Pallet must be public"; - return Err(syn::Error::new(item.span(), msg)); + return Err(syn::Error::new(item.span(), msg)) } if item.generics.where_clause.is_some() { let msg = "Invalid pallet::pallet, where clause not supported on Pallet declaration"; - return Err(syn::Error::new(item.generics.where_clause.span(), msg)); + return Err(syn::Error::new(item.generics.where_clause.span(), msg)) } let instances = diff --git a/frame/support/procedural/src/pallet/parse/storage.rs b/frame/support/procedural/src/pallet/parse/storage.rs index 35c2f5412d519..8b551ab31d6c3 100644 --- a/frame/support/procedural/src/pallet/parse/storage.rs +++ b/frame/support/procedural/src/pallet/parse/storage.rs @@ -49,10 +49,10 @@ pub enum PalletStorageAttr { impl PalletStorageAttr { fn attr_span(&self) -> proc_macro2::Span { match self { - Self::Getter(_, span) - | Self::StorageName(_, span) - | Self::Unbounded(span) - | Self::WhitelistStorage(span) => *span, + Self::Getter(_, span) | + Self::StorageName(_, span) | + Self::Unbounded(span) | + Self::WhitelistStorage(span) => *span, } } } @@ -115,17 +115,15 @@ impl PalletStorageAttrInfo { for attr in attrs { match attr { PalletStorageAttr::Getter(ident, ..) if getter.is_none() => getter = Some(ident), - PalletStorageAttr::StorageName(name, ..) if rename_as.is_none() => { - rename_as = Some(name) - }, + PalletStorageAttr::StorageName(name, ..) if rename_as.is_none() => + rename_as = Some(name), PalletStorageAttr::Unbounded(..) if !unbounded => unbounded = true, PalletStorageAttr::WhitelistStorage(..) if !whitelisted => whitelisted = true, - attr => { + attr => return Err(syn::Error::new( attr.attr_span(), "Invalid attribute: Duplicate attribute", - )) - }, + )), } } @@ -240,9 +238,8 @@ impl StorageGenerics { Self::Map { value, key, .. } => Metadata::Map { value, key }, Self::CountedMap { value, key, .. } => Metadata::CountedMap { value, key }, Self::Value { value, .. } => Metadata::Value { value }, - Self::NMap { keygen, value, .. } => { - Metadata::NMap { keys: collect_keys(&keygen)?, keygen, value } - }, + Self::NMap { keygen, value, .. } => + Metadata::NMap { keys: collect_keys(&keygen)?, keygen, value }, }; Ok(res) @@ -251,11 +248,11 @@ impl StorageGenerics { /// Return the query kind from the defined generics fn query_kind(&self) -> Option { match &self { - Self::DoubleMap { query_kind, .. } - | Self::Map { query_kind, .. } - | Self::CountedMap { query_kind, .. } - | Self::Value { query_kind, .. } - | Self::NMap { query_kind, .. } => query_kind.clone(), + Self::DoubleMap { query_kind, .. } | + Self::Map { query_kind, .. } | + Self::CountedMap { query_kind, .. } | + Self::Value { query_kind, .. } | + Self::NMap { query_kind, .. } => query_kind.clone(), } } } @@ -296,8 +293,8 @@ fn check_generics( }; for (gen_name, gen_binding) in map { - if !mandatory_generics.contains(&gen_name.as_str()) - && !optional_generics.contains(&gen_name.as_str()) + if !mandatory_generics.contains(&gen_name.as_str()) && + !optional_generics.contains(&gen_name.as_str()) { let msg = format!( "Invalid pallet::storage, Unexpected generic `{}` for `{}`. {}", @@ -342,7 +339,7 @@ fn process_named_generics( let msg = "Invalid pallet::storage, Duplicated named generic"; let mut err = syn::Error::new(arg.ident.span(), msg); err.combine(syn::Error::new(other.ident.span(), msg)); - return Err(err); + return Err(err) } parsed.insert(arg.ident.to_string(), arg.clone()); } @@ -514,9 +511,8 @@ fn process_unnamed_generics( })?; let res = match storage { - StorageKind::Value => { - (None, Metadata::Value { value: retrieve_arg(1)? }, retrieve_arg(2).ok()) - }, + StorageKind::Value => + (None, Metadata::Value { value: retrieve_arg(1)? }, retrieve_arg(2).ok()), StorageKind::Map => ( None, Metadata::Map { key: retrieve_arg(2)?, value: retrieve_arg(3)? }, @@ -563,7 +559,7 @@ fn process_generics( in order to expand metadata, found `{}`.", found, ); - return Err(syn::Error::new(segment.ident.span(), msg)); + return Err(syn::Error::new(segment.ident.span(), msg)) }, }; @@ -574,7 +570,7 @@ fn process_generics( _ => { let msg = "Invalid pallet::storage, invalid number of generic generic arguments, \ expect more that 0 generic arguments."; - return Err(syn::Error::new(segment.span(), msg)); + return Err(syn::Error::new(segment.span(), msg)) }, }; @@ -621,7 +617,7 @@ fn extract_key(ty: &syn::Type) -> syn::Result { typ } else { let msg = "Invalid pallet::storage, expected type path"; - return Err(syn::Error::new(ty.span(), msg)); + return Err(syn::Error::new(ty.span(), msg)) }; let key_struct = typ.path.segments.last().ok_or_else(|| { @@ -630,14 +626,14 @@ fn extract_key(ty: &syn::Type) -> syn::Result { })?; if key_struct.ident != "Key" && key_struct.ident != "NMapKey" { let msg = "Invalid pallet::storage, expected Key or NMapKey struct"; - return Err(syn::Error::new(key_struct.ident.span(), msg)); + return Err(syn::Error::new(key_struct.ident.span(), msg)) } let ty_params = if let syn::PathArguments::AngleBracketed(args) = &key_struct.arguments { args } else { let msg = "Invalid pallet::storage, expected angle bracketed arguments"; - return Err(syn::Error::new(key_struct.arguments.span(), msg)); + return Err(syn::Error::new(key_struct.arguments.span(), msg)) }; if ty_params.args.len() != 2 { @@ -646,14 +642,14 @@ fn extract_key(ty: &syn::Type) -> syn::Result { for Key struct, expected 2 args, found {}", ty_params.args.len() ); - return Err(syn::Error::new(ty_params.span(), msg)); + return Err(syn::Error::new(ty_params.span(), msg)) } let key = match &ty_params.args[1] { syn::GenericArgument::Type(key_ty) => key_ty.clone(), _ => { let msg = "Invalid pallet::storage, expected type"; - return Err(syn::Error::new(ty_params.args[1].span(), msg)); + return Err(syn::Error::new(ty_params.args[1].span(), msg)) }, }; @@ -687,7 +683,7 @@ impl StorageDef { let item = if let syn::Item::Type(item) = item { item } else { - return Err(syn::Error::new(item.span(), "Invalid pallet::storage, expect item type.")); + return Err(syn::Error::new(item.span(), "Invalid pallet::storage, expect item type.")) }; let attrs: Vec = helper::take_item_pallet_attrs(&mut item.attrs)?; @@ -707,12 +703,12 @@ impl StorageDef { typ } else { let msg = "Invalid pallet::storage, expected type path"; - return Err(syn::Error::new(item.ty.span(), msg)); + return Err(syn::Error::new(item.ty.span(), msg)) }; if typ.path.segments.len() != 1 { let msg = "Invalid pallet::storage, expected type path with one segment"; - return Err(syn::Error::new(item.ty.span(), msg)); + return Err(syn::Error::new(item.ty.span(), msg)) } let (named_generics, metadata, query_kind) = process_generics(&typ.path.segments[0])?; @@ -731,22 +727,16 @@ impl StorageDef { .segments .last() .map_or(false, |s| s.ident == "OptionQuery") => - { - return Ok(Some(QueryKind::OptionQuery)) - }, + return Ok(Some(QueryKind::OptionQuery)), Type::Path(TypePath { path: Path { segments, .. }, .. }) if segments.last().map_or(false, |s| s.ident == "ResultQuery") => - { segments .last() .expect("segments is checked to have the last value; qed") - .clone() - }, + .clone(), Type::Path(path) if path.path.segments.last().map_or(false, |s| s.ident == "ValueQuery") => - { - return Ok(Some(QueryKind::ValueQuery)) - }, + return Ok(Some(QueryKind::ValueQuery)), _ => return Ok(None), }; @@ -760,7 +750,7 @@ impl StorageDef { for ResultQuery, expected 1 type argument, found {}", args.len(), ); - return Err(syn::Error::new(args.span(), msg)); + return Err(syn::Error::new(args.span(), msg)) } args[0].clone() @@ -771,7 +761,7 @@ impl StorageDef { expected angle-bracketed arguments, found `{}`", args.to_token_stream().to_string() ); - return Err(syn::Error::new(args.span(), msg)); + return Err(syn::Error::new(args.span(), msg)) }, }; @@ -787,7 +777,7 @@ impl StorageDef { segments, found {}", err_variant.len(), ); - return Err(syn::Error::new(err_variant.span(), msg)); + return Err(syn::Error::new(err_variant.span(), msg)) } let mut error = err_variant.clone(); let err_variant = error @@ -823,7 +813,7 @@ impl StorageDef { let msg = "Invalid pallet::storage, cannot generate getter because QueryKind is not \ identifiable. QueryKind must be `OptionQuery`, `ResultQuery`, `ValueQuery`, or default \ one to be identifiable."; - return Err(syn::Error::new(getter.span(), msg)); + return Err(syn::Error::new(getter.span(), msg)) } Ok(StorageDef { diff --git a/frame/support/procedural/src/pallet/parse/type_value.rs b/frame/support/procedural/src/pallet/parse/type_value.rs index cd1bcd0b48ee7..a3d004cd8a532 100644 --- a/frame/support/procedural/src/pallet/parse/type_value.rs +++ b/frame/support/procedural/src/pallet/parse/type_value.rs @@ -52,7 +52,7 @@ impl TypeValueDef { item } else { let msg = "Invalid pallet::type_value, expected item fn"; - return Err(syn::Error::new(item.span(), msg)); + return Err(syn::Error::new(item.span(), msg)) }; let mut docs = vec![]; @@ -60,13 +60,13 @@ impl TypeValueDef { if let Ok(syn::Meta::NameValue(meta)) = attr.parse_meta() { if meta.path.get_ident().map_or(false, |ident| ident == "doc") { docs.push(meta.lit); - continue; + continue } } let msg = "Invalid pallet::type_value, unexpected attribute, only doc attribute are \ allowed"; - return Err(syn::Error::new(attr.span(), msg)); + return Err(syn::Error::new(attr.span(), msg)) } if let Some(span) = item @@ -80,12 +80,12 @@ impl TypeValueDef { .or_else(|| item.sig.variadic.as_ref().map(|t| t.span())) { let msg = "Invalid pallet::type_value, unexpected token"; - return Err(syn::Error::new(span, msg)); + return Err(syn::Error::new(span, msg)) } if !item.sig.inputs.is_empty() { let msg = "Invalid pallet::type_value, unexpected argument"; - return Err(syn::Error::new(item.sig.inputs[0].span(), msg)); + return Err(syn::Error::new(item.sig.inputs[0].span(), msg)) } let vis = item.vis.clone(); @@ -95,7 +95,7 @@ impl TypeValueDef { syn::ReturnType::Type(_, type_) => type_, syn::ReturnType::Default => { let msg = "Invalid pallet::type_value, expected return type"; - return Err(syn::Error::new(item.sig.span(), msg)); + return Err(syn::Error::new(item.sig.span(), msg)) }, }; diff --git a/frame/support/procedural/src/pallet/parse/validate_unsigned.rs b/frame/support/procedural/src/pallet/parse/validate_unsigned.rs index e5cda19fc0b4e..18d5a2dc4443f 100644 --- a/frame/support/procedural/src/pallet/parse/validate_unsigned.rs +++ b/frame/support/procedural/src/pallet/parse/validate_unsigned.rs @@ -32,24 +32,24 @@ impl ValidateUnsignedDef { item } else { let msg = "Invalid pallet::validate_unsigned, expected item impl"; - return Err(syn::Error::new(item.span(), msg)); + return Err(syn::Error::new(item.span(), msg)) }; if item.trait_.is_none() { let msg = "Invalid pallet::validate_unsigned, expected impl<..> ValidateUnsigned for \ Pallet<..>"; - return Err(syn::Error::new(item.span(), msg)); + return Err(syn::Error::new(item.span(), msg)) } if let Some(last) = item.trait_.as_ref().unwrap().1.segments.last() { if last.ident != "ValidateUnsigned" { let msg = "Invalid pallet::validate_unsigned, expected trait ValidateUnsigned"; - return Err(syn::Error::new(last.span(), msg)); + return Err(syn::Error::new(last.span(), msg)) } } else { let msg = "Invalid pallet::validate_unsigned, expected impl<..> ValidateUnsigned for \ Pallet<..>"; - return Err(syn::Error::new(item.span(), msg)); + return Err(syn::Error::new(item.span(), msg)) } let instances = vec![ diff --git a/frame/support/procedural/src/pallet_error.rs b/frame/support/procedural/src/pallet_error.rs index 60cf4006a7b8e..216168131e43d 100644 --- a/frame/support/procedural/src/pallet_error.rs +++ b/frame/support/procedural/src/pallet_error.rs @@ -35,8 +35,8 @@ pub fn derive_pallet_error(input: proc_macro::TokenStream) -> proc_macro::TokenS let max_encoded_size = match data { syn::Data::Struct(syn::DataStruct { fields, .. }) => match fields { - syn::Fields::Named(syn::FieldsNamed { named: fields, .. }) - | syn::Fields::Unnamed(syn::FieldsUnnamed { unnamed: fields, .. }) => { + syn::Fields::Named(syn::FieldsNamed { named: fields, .. }) | + syn::Fields::Unnamed(syn::FieldsUnnamed { unnamed: fields, .. }) => { let maybe_field_tys = fields .iter() .map(|f| generate_field_types(f, &frame_support)) @@ -94,7 +94,7 @@ pub fn derive_pallet_error(input: proc_macro::TokenStream) -> proc_macro::TokenS }, syn::Data::Union(syn::DataUnion { union_token, .. }) => { let msg = "Cannot derive `PalletError` for union; please implement it directly"; - return syn::Error::new(union_token.span, msg).into_compile_error().into(); + return syn::Error::new(union_token.span, msg).into_compile_error().into() }, }; @@ -127,15 +127,13 @@ fn generate_field_types( { syn::NestedMeta::Meta(syn::Meta::Path(path)) if path.get_ident().map_or(false, |i| i == "skip") => - { - return Ok(None) - }, + return Ok(None), syn::NestedMeta::Meta(syn::Meta::Path(path)) if path.get_ident().map_or(false, |i| i == "compact") => { let field_ty = &field.ty; - return Ok(Some(quote::quote!(#scrate::codec::Compact<#field_ty>))); + return Ok(Some(quote::quote!(#scrate::codec::Compact<#field_ty>))) }, syn::NestedMeta::Meta(syn::Meta::NameValue(syn::MetaNameValue { @@ -144,7 +142,7 @@ fn generate_field_types( .. })) if path.get_ident().map_or(false, |i| i == "encoded_as") => { let ty = proc_macro2::TokenStream::from_str(&lit_str.value())?; - return Ok(Some(ty)); + return Ok(Some(ty)) }, _ => (), @@ -175,9 +173,7 @@ fn generate_variant_field_types( { syn::NestedMeta::Meta(syn::Meta::Path(path)) if path.get_ident().map_or(false, |i| i == "skip") => - { - return Ok(None) - }, + return Ok(None), _ => (), } @@ -188,8 +184,8 @@ fn generate_variant_field_types( } match &variant.fields { - syn::Fields::Named(syn::FieldsNamed { named: fields, .. }) - | syn::Fields::Unnamed(syn::FieldsUnnamed { unnamed: fields, .. }) => { + syn::Fields::Named(syn::FieldsNamed { named: fields, .. }) | + syn::Fields::Unnamed(syn::FieldsUnnamed { unnamed: fields, .. }) => { let field_tys = fields .iter() .map(|field| generate_field_types(field, scrate)) diff --git a/frame/support/procedural/src/partial_eq_no_bound.rs b/frame/support/procedural/src/partial_eq_no_bound.rs index 9e5ace6225d3c..31930c0c3dae3 100644 --- a/frame/support/procedural/src/partial_eq_no_bound.rs +++ b/frame/support/procedural/src/partial_eq_no_bound.rs @@ -122,7 +122,7 @@ pub fn derive_partial_eq_no_bound(input: proc_macro::TokenStream) -> proc_macro: }, syn::Data::Union(_) => { let msg = "Union type not supported by `derive(PartialEqNoBound)`"; - return syn::Error::new(input.span(), msg).to_compile_error().into(); + return syn::Error::new(input.span(), msg).to_compile_error().into() }, }; diff --git a/frame/support/procedural/src/storage/genesis_config/genesis_config_def.rs b/frame/support/procedural/src/storage/genesis_config/genesis_config_def.rs index 474235bfff3e8..d24e50096f25e 100644 --- a/frame/support/procedural/src/storage/genesis_config/genesis_config_def.rs +++ b/frame/support/procedural/src/storage/genesis_config/genesis_config_def.rs @@ -138,7 +138,7 @@ impl GenesisConfigDef { return Err(syn::Error::new( meta.span(), "extra genesis config items do not support `cfg` attribute", - )); + )) } Ok(meta) }) diff --git a/frame/support/procedural/src/storage/mod.rs b/frame/support/procedural/src/storage/mod.rs index 3db2d79c98460..e8e2d7529cb3f 100644 --- a/frame/support/procedural/src/storage/mod.rs +++ b/frame/support/procedural/src/storage/mod.rs @@ -259,24 +259,20 @@ impl StorageLineDefExt { hidden_crate: &proc_macro2::TokenStream, ) -> Self { let is_generic = match &storage_def.storage_type { - StorageLineTypeDef::Simple(value) => { - ext::type_contains_ident(value, &def.module_runtime_generic) - }, - StorageLineTypeDef::Map(map) => { - ext::type_contains_ident(&map.key, &def.module_runtime_generic) - || ext::type_contains_ident(&map.value, &def.module_runtime_generic) - }, - StorageLineTypeDef::DoubleMap(map) => { - ext::type_contains_ident(&map.key1, &def.module_runtime_generic) - || ext::type_contains_ident(&map.key2, &def.module_runtime_generic) - || ext::type_contains_ident(&map.value, &def.module_runtime_generic) - }, - StorageLineTypeDef::NMap(map) => { + StorageLineTypeDef::Simple(value) => + ext::type_contains_ident(value, &def.module_runtime_generic), + StorageLineTypeDef::Map(map) => + ext::type_contains_ident(&map.key, &def.module_runtime_generic) || + ext::type_contains_ident(&map.value, &def.module_runtime_generic), + StorageLineTypeDef::DoubleMap(map) => + ext::type_contains_ident(&map.key1, &def.module_runtime_generic) || + ext::type_contains_ident(&map.key2, &def.module_runtime_generic) || + ext::type_contains_ident(&map.value, &def.module_runtime_generic), + StorageLineTypeDef::NMap(map) => map.keys .iter() - .any(|key| ext::type_contains_ident(key, &def.module_runtime_generic)) - || ext::type_contains_ident(&map.value, &def.module_runtime_generic) - }, + .any(|key| ext::type_contains_ident(key, &def.module_runtime_generic)) || + ext::type_contains_ident(&map.value, &def.module_runtime_generic), }; let query_type = match &storage_def.storage_type { @@ -399,7 +395,7 @@ impl NMapDef { if self.keys.len() == 1 { let hasher = &self.hashers[0].to_storage_hasher_struct(); let key = &self.keys[0]; - return quote!( #scrate::storage::types::Key<#scrate::#hasher, #key> ); + return quote!( #scrate::storage::types::Key<#scrate::#hasher, #key> ) } let key_hasher = self @@ -417,7 +413,7 @@ impl NMapDef { fn to_key_tuple(&self) -> proc_macro2::TokenStream { if self.keys.len() == 1 { let key = &self.keys[0]; - return quote!(#key); + return quote!(#key) } let tuple = self.keys.iter().map(|key| quote!(#key)).collect::>(); diff --git a/frame/support/procedural/src/storage/parse.rs b/frame/support/procedural/src/storage/parse.rs index a3b21c829eb3e..54026b7d78b19 100644 --- a/frame/support/procedural/src/storage/parse.rs +++ b/frame/support/procedural/src/storage/parse.rs @@ -367,17 +367,16 @@ fn get_module_instance( it is now defined at frame_support::traits::Instance. Expect `Instance` found `{}`", instantiable.as_ref().unwrap(), ); - return Err(syn::Error::new(instantiable.span(), msg)); + return Err(syn::Error::new(instantiable.span(), msg)) } match (instance, instantiable, default_instance) { - (Some(instance), Some(instantiable), default_instance) => { + (Some(instance), Some(instantiable), default_instance) => Ok(Some(super::ModuleInstanceDef { instance_generic: instance, instance_trait: instantiable, instance_default: default_instance, - })) - }, + })), (None, None, None) => Ok(None), (Some(instance), None, _) => Err(syn::Error::new( instance.span(), @@ -431,7 +430,7 @@ pub fn parse(input: syn::parse::ParseStream) -> syn::Result { - line.max_values.inner.map(|i| i.expr.content) - }, - DeclStorageType::Simple(_) => { + DeclStorageType::Map(_) | DeclStorageType::DoubleMap(_) | DeclStorageType::NMap(_) => + line.max_values.inner.map(|i| i.expr.content), + DeclStorageType::Simple(_) => if let Some(max_values) = line.max_values.inner { let msg = "unexpected max_values attribute for storage value."; let span = max_values.max_values_keyword.span(); - return Err(syn::Error::new(span, msg)); + return Err(syn::Error::new(span, msg)) } else { Some(syn::parse_quote!(1u32)) - } - }, + }, }; let span = line.storage_type.span(); @@ -527,15 +524,14 @@ fn parse_storage_line_defs( key: map.key, value: map.value, }), - DeclStorageType::DoubleMap(map) => { + DeclStorageType::DoubleMap(map) => super::StorageLineTypeDef::DoubleMap(Box::new(super::DoubleMapDef { hasher1: map.hasher1.inner.ok_or_else(no_hasher_error)?.into(), hasher2: map.hasher2.inner.ok_or_else(no_hasher_error)?.into(), key1: map.key1, key2: map.key2, value: map.value, - })) - }, + })), DeclStorageType::NMap(map) => super::StorageLineTypeDef::NMap(super::NMapDef { hashers: map .storage_keys diff --git a/frame/support/procedural/src/storage/print_pallet_upgrade.rs b/frame/support/procedural/src/storage/print_pallet_upgrade.rs index 49e8fd93a98b8..03f09a7edb48e 100644 --- a/frame/support/procedural/src/storage/print_pallet_upgrade.rs +++ b/frame/support/procedural/src/storage/print_pallet_upgrade.rs @@ -26,7 +26,7 @@ fn to_cleaned_string(t: impl quote::ToTokens) -> String { /// Print an incomplete upgrade from decl_storage macro to new pallet attribute. pub fn maybe_print_pallet_upgrade(def: &super::DeclStorageDefExt) { if !check_print_pallet_upgrade() { - return; + return } let scrate = "e::quote!(frame_support); @@ -58,7 +58,7 @@ pub fn maybe_print_pallet_upgrade(def: &super::DeclStorageDefExt) { Ok(g) => g, Err(err) => { println!("Could not print upgrade due compile error: {:?}", err); - return; + return }, }; diff --git a/frame/support/procedural/src/storage_alias.rs b/frame/support/procedural/src/storage_alias.rs index 33630b706e084..e0df0123595b9 100644 --- a/frame/support/procedural/src/storage_alias.rs +++ b/frame/support/procedural/src/storage_alias.rs @@ -294,20 +294,20 @@ impl StorageType { /// The prefix for this storage type. fn prefix(&self) -> &SimplePath { match self { - Self::Value { prefix, .. } - | Self::Map { prefix, .. } - | Self::NMap { prefix, .. } - | Self::DoubleMap { prefix, .. } => prefix, + Self::Value { prefix, .. } | + Self::Map { prefix, .. } | + Self::NMap { prefix, .. } | + Self::DoubleMap { prefix, .. } => prefix, } } /// The prefix generics for this storage type. fn prefix_generics(&self) -> Option<&TypeGenerics> { match self { - Self::Value { prefix_generics, .. } - | Self::Map { prefix_generics, .. } - | Self::NMap { prefix_generics, .. } - | Self::DoubleMap { prefix_generics, .. } => prefix_generics.as_ref(), + Self::Value { prefix_generics, .. } | + Self::Map { prefix_generics, .. } | + Self::NMap { prefix_generics, .. } | + Self::DoubleMap { prefix_generics, .. } => prefix_generics.as_ref(), } } } @@ -431,7 +431,7 @@ impl Parse for Input { } else if lookahead.peek(Token![=]) { None } else { - return Err(lookahead.error()); + return Err(lookahead.error()) }; let lookahead = input.lookahead1(); @@ -440,7 +440,7 @@ impl Parse for Input { } else if lookahead.peek(Token![=]) { None } else { - return Err(lookahead.error()); + return Err(lookahead.error()) }; let _equal = input.parse()?; @@ -513,7 +513,7 @@ fn generate_storage_instance( visibility: &Visibility, ) -> Result { if let Some(ident) = prefix.get_ident().filter(|i| *i == "_") { - return Err(Error::new(ident.span(), "`_` is not allowed as prefix by `storage_alias`.")); + return Err(Error::new(ident.span(), "`_` is not allowed as prefix by `storage_alias`.")) } let (pallet_prefix, impl_generics, type_generics) = @@ -541,7 +541,7 @@ fn generate_storage_instance( return Err(Error::new_spanned( prefix, "If there are no generics, the prefix is only allowed to be an identifier.", - )); + )) }; let where_clause = storage_where_clause.map(|w| quote!(#w)).unwrap_or_default(); diff --git a/frame/support/procedural/tools/src/syn_ext.rs b/frame/support/procedural/tools/src/syn_ext.rs index 31163b63fcf6c..1a2d7c1d372ad 100644 --- a/frame/support/procedural/tools/src/syn_ext.rs +++ b/frame/support/procedural/tools/src/syn_ext.rs @@ -171,7 +171,7 @@ pub fn extract_type_option(typ: &syn::Type) -> Option { // Option has only one type argument in angle bracket. if let syn::PathArguments::AngleBracketed(a) = &v.arguments { if let syn::GenericArgument::Type(typ) = a.args.last()? { - return Some(typ.clone()); + return Some(typ.clone()) } } } diff --git a/frame/support/src/dispatch.rs b/frame/support/src/dispatch.rs index 4e4faf28856e2..d497a672e2970 100644 --- a/frame/support/src/dispatch.rs +++ b/frame/support/src/dispatch.rs @@ -460,7 +460,7 @@ pub trait WeighData { impl WeighData for Weight { fn weigh_data(&self, _: T) -> Weight { - return *self; + return *self } } @@ -472,13 +472,13 @@ impl PaysFee for (Weight, DispatchClass, Pays) { impl WeighData for (Weight, DispatchClass) { fn weigh_data(&self, args: T) -> Weight { - return self.0.weigh_data(args); + return self.0.weigh_data(args) } } impl WeighData for (Weight, DispatchClass, Pays) { fn weigh_data(&self, args: T) -> Weight { - return self.0.weigh_data(args); + return self.0.weigh_data(args) } } @@ -496,7 +496,7 @@ impl PaysFee for (Weight, DispatchClass) { impl WeighData for (Weight, Pays) { fn weigh_data(&self, args: T) -> Weight { - return self.0.weigh_data(args); + return self.0.weigh_data(args) } } @@ -580,13 +580,13 @@ impl PaysFee for u64 { impl WeighData for u64 { fn weigh_data(&self, _: T) -> Weight { - return Weight::zero().set_ref_time(*self); + return Weight::zero().set_ref_time(*self) } } impl WeighData for (u64, DispatchClass, Pays) { fn weigh_data(&self, args: T) -> Weight { - return self.0.weigh_data(args); + return self.0.weigh_data(args) } } @@ -604,7 +604,7 @@ impl PaysFee for (u64, DispatchClass, Pays) { impl WeighData for (u64, DispatchClass) { fn weigh_data(&self, args: T) -> Weight { - return self.0.weigh_data(args); + return self.0.weigh_data(args) } } @@ -622,7 +622,7 @@ impl PaysFee for (u64, DispatchClass) { impl WeighData for (u64, Pays) { fn weigh_data(&self, args: T) -> Weight { - return self.0.weigh_data(args); + return self.0.weigh_data(args) } } @@ -3258,7 +3258,7 @@ mod tests { fn index() -> Option { let type_id = sp_std::any::TypeId::of::

(); if type_id == sp_std::any::TypeId::of::() { - return Some(0); + return Some(0) } None @@ -3266,7 +3266,7 @@ mod tests { fn name() -> Option<&'static str> { let type_id = sp_std::any::TypeId::of::

(); if type_id == sp_std::any::TypeId::of::() { - return Some("Test"); + return Some("Test") } None @@ -3274,7 +3274,7 @@ mod tests { fn module_name() -> Option<&'static str> { let type_id = sp_std::any::TypeId::of::

(); if type_id == sp_std::any::TypeId::of::() { - return Some("tests"); + return Some("tests") } None @@ -3282,7 +3282,7 @@ mod tests { fn crate_version() -> Option { let type_id = sp_std::any::TypeId::of::

(); if type_id == sp_std::any::TypeId::of::>() { - return Some(0); + return Some(0) } if type_id == sp_std::any::TypeId::of::>() { - return Some(1); + return Some(1) } if type_id == sp_std::any::TypeId::of::>() { - return Some(2); + return Some(2) } None @@ -550,13 +549,13 @@ impl frame_support::traits::PalletInfo for Runtime { fn name() -> Option<&'static str> { let type_id = sp_std::any::TypeId::of::

(); if type_id == sp_std::any::TypeId::of::>() { - return Some("System"); + return Some("System") } if type_id == sp_std::any::TypeId::of::>() { - return Some("Timestamp"); + return Some("Timestamp") } if type_id == sp_std::any::TypeId::of::>() { - return Some("Babe"); + return Some("Babe") } None @@ -564,13 +563,13 @@ impl frame_support::traits::PalletInfo for Runtime { fn module_name() -> Option<&'static str> { let type_id = sp_std::any::TypeId::of::

(); if type_id == sp_std::any::TypeId::of::>() { - return Some("system"); + return Some("system") } if type_id == sp_std::any::TypeId::of::>() { - return Some("pallet_timestamp"); + return Some("pallet_timestamp") } if type_id == sp_std::any::TypeId::of::>() { - return Some("pallet_babe"); + return Some("pallet_babe") } None @@ -579,13 +578,13 @@ impl frame_support::traits::PalletInfo for Runtime { use frame_support::traits::PalletInfoAccess as _; let type_id = sp_std::any::TypeId::of::

(); if type_id == sp_std::any::TypeId::of::>() { - return Some(system::Pallet::::crate_version()); + return Some(system::Pallet::::crate_version()) } if type_id == sp_std::any::TypeId::of::>() { - return Some(pallet_timestamp::Pallet::::crate_version()); + return Some(pallet_timestamp::Pallet::::crate_version()) } if type_id == sp_std::any::TypeId::of::>() { - return Some(pallet_babe::Pallet::::crate_version()); + return Some(pallet_babe::Pallet::::crate_version()) } None @@ -699,7 +698,7 @@ fn code_using_trie() -> u64 { let mut t = TrieDBMutBuilderV1::::new(&mut mdb, &mut root).build(); for (key, value) in &pairs { if t.insert(key, value).is_err() { - return 101; + return 101 } } } diff --git a/test-utils/runtime/src/system.rs b/test-utils/runtime/src/system.rs index 6e7ef2adbcf13..6e33d5c25fe6f 100644 --- a/test-utils/runtime/src/system.rs +++ b/test-utils/runtime/src/system.rs @@ -188,17 +188,17 @@ impl frame_support::traits::ExecuteBlock for BlockExecutor { /// This doesn't attempt to validate anything regarding the block. pub fn validate_transaction(utx: Extrinsic) -> TransactionValidity { if check_signature(&utx).is_err() { - return InvalidTransaction::BadProof.into(); + return InvalidTransaction::BadProof.into() } let tx = utx.transfer(); let nonce_key = tx.from.to_keyed_vec(NONCE_OF); let expected_nonce: u64 = storage::hashed::get_or(&blake2_256, &nonce_key, 0); if tx.nonce < expected_nonce { - return InvalidTransaction::Stale.into(); + return InvalidTransaction::Stale.into() } if tx.nonce > expected_nonce + 64 { - return InvalidTransaction::Future.into(); + return InvalidTransaction::Future.into() } let encode = |from: &AccountId, nonce: u64| (from, nonce).encode(); @@ -261,15 +261,12 @@ fn execute_transaction_backend(utx: &Extrinsic, extrinsic_index: u32) -> ApplyEx match utx { Extrinsic::Transfer { exhaust_resources_when_not_first: true, .. } if extrinsic_index != 0 => - { - Err(InvalidTransaction::ExhaustsResources.into()) - }, + Err(InvalidTransaction::ExhaustsResources.into()), Extrinsic::Transfer { ref transfer, .. } => execute_transfer_backend(transfer), Extrinsic::AuthoritiesChange(ref new_auth) => execute_new_authorities_backend(new_auth), Extrinsic::IncludeData(_) => Ok(Ok(())), - Extrinsic::StorageChange(key, value) => { - execute_storage_change(key, value.as_ref().map(|v| &**v)) - }, + Extrinsic::StorageChange(key, value) => + execute_storage_change(key, value.as_ref().map(|v| &**v)), Extrinsic::OffchainIndexSet(key, value) => { sp_io::offchain_index::set(key, value); Ok(Ok(())) @@ -287,7 +284,7 @@ fn execute_transfer_backend(tx: &Transfer) -> ApplyExtrinsicResult { let nonce_key = tx.from.to_keyed_vec(NONCE_OF); let expected_nonce: u64 = storage::hashed::get_or(&blake2_256, &nonce_key, 0); if tx.nonce != expected_nonce { - return Err(InvalidTransaction::Stale.into()); + return Err(InvalidTransaction::Stale.into()) } // increment nonce in storage @@ -299,7 +296,7 @@ fn execute_transfer_backend(tx: &Transfer) -> ApplyExtrinsicResult { // enact transfer if tx.amount > from_balance { - return Err(InvalidTransaction::Payment.into()); + return Err(InvalidTransaction::Payment.into()) } let to_balance_key = tx.to.to_keyed_vec(BALANCE_OF); let to_balance: u64 = storage::hashed::get_or(&blake2_256, &to_balance_key, 0); diff --git a/test-utils/runtime/transaction-pool/src/lib.rs b/test-utils/runtime/transaction-pool/src/lib.rs index 4d0e7aea1884a..f8d551a6fa5bd 100644 --- a/test-utils/runtime/transaction-pool/src/lib.rs +++ b/test-utils/runtime/transaction-pool/src/lib.rs @@ -254,14 +254,13 @@ impl sc_transaction_pool::ChainApi for TestApi { if !found_best { return ready(Ok(Err(TransactionValidityError::Invalid( InvalidTransaction::Custom(1), - )))); + )))) } }, - Ok(None) => { + Ok(None) => return ready(Ok(Err(TransactionValidityError::Invalid( InvalidTransaction::Custom(2), - )))) - }, + )))), Err(e) => return ready(Err(e)), } @@ -277,9 +276,7 @@ impl sc_transaction_pool::ChainApi for TestApi { }; if self.chain.read().invalid_hashes.contains(&self.hash_and_length(&uxt).0) { - return ready(Ok(Err(TransactionValidityError::Invalid(InvalidTransaction::Custom( - 0, - ))))); + return ready(Ok(Err(TransactionValidityError::Invalid(InvalidTransaction::Custom(0))))) } let mut validity = @@ -295,9 +292,8 @@ impl sc_transaction_pool::ChainApi for TestApi { at: &BlockId, ) -> Result>, Error> { Ok(match at { - generic::BlockId::Hash(x) => { - self.chain.read().block_by_hash.get(x).map(|b| *b.header.number()) - }, + generic::BlockId::Hash(x) => + self.chain.read().block_by_hash.get(x).map(|b| *b.header.number()), generic::BlockId::Number(num) => Some(*num), }) } @@ -308,11 +304,10 @@ impl sc_transaction_pool::ChainApi for TestApi { ) -> Result::Hash>, Error> { Ok(match at { generic::BlockId::Hash(x) => Some(*x), - generic::BlockId::Number(num) => { + generic::BlockId::Number(num) => self.chain.read().block_by_number.get(num).and_then(|blocks| { blocks.iter().find(|b| b.1.is_best()).map(|b| b.0.header().hash()) - }) - }, + }), }) } @@ -334,12 +329,10 @@ impl sc_transaction_pool::ChainApi for TestApi { at: &BlockId, ) -> Result::Header>, Self::Error> { Ok(match at { - BlockId::Number(num) => { - self.chain.read().block_by_number.get(num).map(|b| b[0].0.header().clone()) - }, - BlockId::Hash(hash) => { - self.chain.read().block_by_hash.get(hash).map(|b| b.header().clone()) - }, + BlockId::Number(num) => + self.chain.read().block_by_number.get(num).map(|b| b[0].0.header().clone()), + BlockId::Hash(hash) => + self.chain.read().block_by_hash.get(hash).map(|b| b.header().clone()), }) } diff --git a/utils/build-script-utils/src/git.rs b/utils/build-script-utils/src/git.rs index a00ad520b1464..db9a4b291ffdb 100644 --- a/utils/build-script-utils/src/git.rs +++ b/utils/build-script-utils/src/git.rs @@ -33,7 +33,7 @@ pub fn rerun_if_git_head_changed() { Err(err) => { eprintln!("cargo:warning=Unable to read the Git repository: {}", err); - return; + return }, Ok(None) => {}, Ok(Some(paths)) => { @@ -41,7 +41,7 @@ pub fn rerun_if_git_head_changed() { println!("cargo:rerun-if-changed={}", p.display()); } - return; + return }, } diff --git a/utils/fork-tree/src/lib.rs b/utils/fork-tree/src/lib.rs index eaf6983a280e5..76c28d910f943 100644 --- a/utils/fork-tree/src/lib.rs +++ b/utils/fork-tree/src/lib.rs @@ -144,7 +144,7 @@ where { if let Some(ref best_finalized_number) = self.best_finalized_number { if number <= *best_finalized_number { - return Err(Error::Revert); + return Err(Error::Revert) } } @@ -155,7 +155,7 @@ where }; if children.iter().any(|elem| elem.hash == hash) { - return Err(Error::Duplicate); + return Err(Error::Duplicate) } children.push(Node { data, hash, number, children: Default::default() }); @@ -310,7 +310,7 @@ where while root_idx < self.roots.len() { if *number <= self.roots[root_idx].number { root_idx += 1; - continue; + continue } // The second element in the stack tuple tracks what is the **next** children // index to search into. If we find an ancestor then we stop searching into @@ -326,7 +326,7 @@ where is_descendent = true; if predicate(&node.data) { found = true; - break; + break } } } @@ -334,7 +334,7 @@ where // If the element we are looking for is a descendent of the current root // then we can stop the search. if is_descendent { - break; + break } root_idx += 1; } @@ -393,9 +393,9 @@ where let mut is_first = true; for child in root_children { - if is_first - && (child.number == *number && child.hash == *hash - || child.number < *number && is_descendent_of(&child.hash, hash)?) + if is_first && + (child.number == *number && child.hash == *hash || + child.number < *number && is_descendent_of(&child.hash, hash)?) { root.children.push(child); // assuming that the tree is well formed only one child should pass this @@ -447,19 +447,19 @@ where { if let Some(ref best_finalized_number) = self.best_finalized_number { if number <= *best_finalized_number { - return Err(Error::Revert); + return Err(Error::Revert) } } // check if one of the current roots is being finalized if let Some(root) = self.finalize_root(hash) { - return Ok(FinalizationResult::Changed(Some(root))); + return Ok(FinalizationResult::Changed(Some(root))) } // make sure we're not finalizing a descendent of any root for root in self.roots.iter() { if number > root.number && is_descendent_of(&root.hash, hash)? { - return Err(Error::UnfinalizedAncestor); + return Err(Error::UnfinalizedAncestor) } } @@ -501,13 +501,13 @@ where { if let Some(ref best_finalized_number) = self.best_finalized_number { if number <= *best_finalized_number { - return Err(Error::Revert); + return Err(Error::Revert) } } // check if one of the current roots is being finalized if let Some(root) = self.finalize_root(hash) { - return Ok(FinalizationResult::Changed(Some(root))); + return Ok(FinalizationResult::Changed(Some(root))) } // we need to: @@ -522,21 +522,21 @@ where let is_finalized = root.hash == *hash; let is_descendant = !is_finalized && root.number > number && is_descendent_of(hash, &root.hash)?; - let is_ancestor = !is_finalized - && !is_descendant && root.number < number - && is_descendent_of(&root.hash, hash)?; + let is_ancestor = !is_finalized && + !is_descendant && root.number < number && + is_descendent_of(&root.hash, hash)?; (is_finalized, is_descendant, is_ancestor) }; // if we have met finalized root - open it and return if is_finalized { - return Ok(FinalizationResult::Changed(Some(self.finalize_root_at(idx)))); + return Ok(FinalizationResult::Changed(Some(self.finalize_root_at(idx)))) } // if node is descendant of finalized block - just leave it as is if is_descendant { idx += 1; - continue; + continue } // if node is ancestor of finalized block - remove it and continue with children @@ -544,7 +544,7 @@ where let root = self.roots.swap_remove(idx); self.roots.extend(root.children); changed = true; - continue; + continue } // if node is neither ancestor, nor descendant of the finalized block - remove it @@ -584,7 +584,7 @@ where { if let Some(ref best_finalized_number) = self.best_finalized_number { if number <= *best_finalized_number { - return Err(Error::Revert); + return Err(Error::Revert) } } @@ -595,14 +595,14 @@ where if predicate(&node.data) && (node.hash == *hash || is_descendent_of(&node.hash, hash)?) { for child in node.children.iter() { - if child.number <= number - && (child.hash == *hash || is_descendent_of(&child.hash, hash)?) + if child.number <= number && + (child.hash == *hash || is_descendent_of(&child.hash, hash)?) { - return Err(Error::UnfinalizedAncestor); + return Err(Error::UnfinalizedAncestor) } } - return Ok(Some(self.roots.iter().any(|root| root.hash == node.hash))); + return Ok(Some(self.roots.iter().any(|root| root.hash == node.hash))) } } @@ -630,7 +630,7 @@ where { if let Some(ref best_finalized_number) = self.best_finalized_number { if number <= *best_finalized_number { - return Err(Error::Revert); + return Err(Error::Revert) } } @@ -642,15 +642,15 @@ where if predicate(&root.data) && (root.hash == *hash || is_descendent_of(&root.hash, hash)?) { for child in root.children.iter() { - if child.number <= number - && (child.hash == *hash || is_descendent_of(&child.hash, hash)?) + if child.number <= number && + (child.hash == *hash || is_descendent_of(&child.hash, hash)?) { - return Err(Error::UnfinalizedAncestor); + return Err(Error::UnfinalizedAncestor) } } position = Some(i); - break; + break } } @@ -670,9 +670,9 @@ where let roots = std::mem::take(&mut self.roots); for root in roots { - let retain = root.number > number && is_descendent_of(hash, &root.hash)? - || root.number == number && root.hash == *hash - || is_descendent_of(&root.hash, hash)?; + let retain = root.number > number && is_descendent_of(hash, &root.hash)? || + root.number == number && root.hash == *hash || + is_descendent_of(&root.hash, hash)?; if retain { self.roots.push(root); @@ -1141,15 +1141,15 @@ mod test { // finalizing "D" will finalize a block from the tree, but it can't be applied yet // since it is not a root change. assert_eq!( - tree.finalizes_any_with_descendent_if(&"D", 10, &is_descendent_of, |c| c.effective - == 10), + tree.finalizes_any_with_descendent_if(&"D", 10, &is_descendent_of, |c| c.effective == + 10), Ok(Some(false)), ); // finalizing "E" is not allowed since there are not finalized anchestors. assert_eq!( - tree.finalizes_any_with_descendent_if(&"E", 15, &is_descendent_of, |c| c.effective - == 10), + tree.finalizes_any_with_descendent_if(&"E", 15, &is_descendent_of, |c| c.effective == + 10), Err(Error::UnfinalizedAncestor) ); @@ -1182,15 +1182,15 @@ mod test { // finalizing "F" will fail since it would finalize past "E" without finalizing "D" first assert_eq!( - tree.finalizes_any_with_descendent_if(&"F", 100, &is_descendent_of, |c| c.effective - <= 100,), + tree.finalizes_any_with_descendent_if(&"F", 100, &is_descendent_of, |c| c.effective <= + 100,), Err(Error::UnfinalizedAncestor), ); // it will work with "G" though since it is not in the same branch as "E" assert_eq!( - tree.finalizes_any_with_descendent_if(&"G", 100, &is_descendent_of, |c| c.effective - <= 100), + tree.finalizes_any_with_descendent_if(&"G", 100, &is_descendent_of, |c| c.effective <= + 100), Ok(Some(true)), ); diff --git a/utils/frame/benchmarking-cli/src/block/bench.rs b/utils/frame/benchmarking-cli/src/block/bench.rs index c4a4fd0ec6741..5a67b11f494f5 100644 --- a/utils/frame/benchmarking-cli/src/block/bench.rs +++ b/utils/frame/benchmarking-cli/src/block/bench.rs @@ -87,7 +87,7 @@ where /// Benchmark the execution speed of historic blocks and log the results. pub fn run(&self) -> Result<()> { if self.params.from == 0 { - return Err("Cannot benchmark the genesis block".into()); + return Err("Cannot benchmark the genesis block".into()) } for i in self.params.from..=self.params.to { diff --git a/utils/frame/benchmarking-cli/src/extrinsic/bench.rs b/utils/frame/benchmarking-cli/src/extrinsic/bench.rs index 3cafdfe3aa53f..2a86c10e7ad27 100644 --- a/utils/frame/benchmarking-cli/src/extrinsic/bench.rs +++ b/utils/frame/benchmarking-cli/src/extrinsic/bench.rs @@ -138,7 +138,7 @@ where let ext_builder = if let Some(ext_builder) = ext_builder { ext_builder } else { - return Ok((builder.build()?.block, None)); + return Ok((builder.build()?.block, None)) }; // Put as many extrinsics into the block as possible and count them. @@ -156,7 +156,7 @@ where num_ext += 1; } if num_ext == 0 { - return Err("A Block must hold at least one extrinsic".into()); + return Err("A Block must hold at least one extrinsic".into()) } info!("Extrinsics per block: {}", num_ext); let block = builder.build()?.block; diff --git a/utils/frame/benchmarking-cli/src/extrinsic/cmd.rs b/utils/frame/benchmarking-cli/src/extrinsic/cmd.rs index 548d02e7bf4ec..b95cd6b5c2e42 100644 --- a/utils/frame/benchmarking-cli/src/extrinsic/cmd.rs +++ b/utils/frame/benchmarking-cli/src/extrinsic/cmd.rs @@ -105,16 +105,15 @@ impl ExtrinsicCmd { list.len(), list.join("\n") ); - return Ok(()); + return Ok(()) } let pallet = self.params.pallet.clone().unwrap_or_default(); let extrinsic = self.params.extrinsic.clone().unwrap_or_default(); let ext_builder = match ext_factory.try_get(&pallet, &extrinsic) { Some(ext_builder) => ext_builder, - None => { - return Err("Unknown pallet or extrinsic. Use --list for a complete list.".into()) - }, + None => + return Err("Unknown pallet or extrinsic. Use --list for a complete list.".into()), }; let bench = Benchmark::new(client, self.params.bench.clone(), inherent_data, digest_items); diff --git a/utils/frame/benchmarking-cli/src/machine/mod.rs b/utils/frame/benchmarking-cli/src/machine/mod.rs index a87088bd0fb62..82b4e5be7358e 100644 --- a/utils/frame/benchmarking-cli/src/machine/mod.rs +++ b/utils/frame/benchmarking-cli/src/machine/mod.rs @@ -210,7 +210,7 @@ impl MachineCmd { /// Validates the CLI arguments. fn validate_args(&self) -> Result<()> { if self.tolerance > 100.0 || self.tolerance < 0.0 { - return Err("The --tolerance argument is out of range".into()); + return Err("The --tolerance argument is out of range".into()) } Ok(()) } diff --git a/utils/frame/benchmarking-cli/src/overhead/template.rs b/utils/frame/benchmarking-cli/src/overhead/template.rs index 5fb64057aa2a4..ceed34d1981f9 100644 --- a/utils/frame/benchmarking-cli/src/overhead/template.rs +++ b/utils/frame/benchmarking-cli/src/overhead/template.rs @@ -117,7 +117,7 @@ impl TemplateData { let mut path = weight_out.clone().unwrap_or_else(|| PathBuf::from(".")); if !path.is_dir() { - return Err("Need directory as --weight-path".into()); + return Err("Need directory as --weight-path".into()) } path.push(format!("{}_weights.rs", self.short_name)); Ok(path) diff --git a/utils/frame/benchmarking-cli/src/pallet/command.rs b/utils/frame/benchmarking-cli/src/pallet/command.rs index a4d40f1cfd66a..242f0e685290f 100644 --- a/utils/frame/benchmarking-cli/src/pallet/command.rs +++ b/utils/frame/benchmarking-cli/src/pallet/command.rs @@ -61,7 +61,7 @@ fn combine_batches( db_batches: Vec, ) -> Vec { if time_batches.is_empty() && db_batches.is_empty() { - return Default::default(); + return Default::default() } let mut all_benchmarks = @@ -121,36 +121,34 @@ impl PalletCmd { { if let Some(output_path) = &self.output { if !output_path.is_dir() && output_path.file_name().is_none() { - return Err("Output file or path is invalid!".into()); + return Err("Output file or path is invalid!".into()) } } if let Some(header_file) = &self.header { if !header_file.is_file() { - return Err("Header file is invalid!".into()); + return Err("Header file is invalid!".into()) }; } if let Some(handlebars_template_file) = &self.template { if !handlebars_template_file.is_file() { - return Err("Handlebars template file is invalid!".into()); + return Err("Handlebars template file is invalid!".into()) }; } if let Some(json_input) = &self.json_input { let raw_data = match std::fs::read(json_input) { Ok(raw_data) => raw_data, - Err(error) => { - return Err(format!("Failed to read {:?}: {}", json_input, error).into()) - }, + Err(error) => + return Err(format!("Failed to read {:?}: {}", json_input, error).into()), }; let batches: Vec = match serde_json::from_slice(&raw_data) { Ok(batches) => batches, - Err(error) => { - return Err(format!("Failed to deserialize {:?}: {}", json_input, error).into()) - }, + Err(error) => + return Err(format!("Failed to deserialize {:?}: {}", json_input, error).into()), }; - return self.output_from_results(&batches); + return self.output_from_results(&batches) } let spec = config.chain_spec; @@ -216,9 +214,9 @@ impl PalletCmd { .for_each(|item| { for benchmark in &item.benchmarks { let benchmark_name = &benchmark.name; - if extrinsic.is_empty() - || extrinsic.as_bytes() == &b"*"[..] - || extrinsics.contains(&&benchmark_name[..]) + if extrinsic.is_empty() || + extrinsic.as_bytes() == &b"*"[..] || + extrinsics.contains(&&benchmark_name[..]) { benchmarks_to_run.push(( item.pallet.clone(), @@ -230,13 +228,13 @@ impl PalletCmd { }); if benchmarks_to_run.is_empty() { - return Err("No benchmarks found which match your input.".into()); + return Err("No benchmarks found which match your input.".into()) } if self.list { // List benchmarks instead of running them list_benchmark(benchmarks_to_run); - return Ok(()); + return Ok(()) } // Run the benchmarks @@ -267,7 +265,7 @@ impl PalletCmd { // The slope logic needs at least two points // to compute a slope. if self.steps < 2 { - return Err("`steps` must be at least 2.".into()); + return Err("`steps` must be at least 2.".into()) } let step_size = (diff as f32 / (self.steps - 1) as f32).max(0.0); @@ -501,7 +499,7 @@ impl PalletCmd { fs::write(path, json)?; } else { print!("{json}"); - return Ok(true); + return Ok(true) } } @@ -524,7 +522,7 @@ impl PalletCmd { // Skip raw data + analysis if there are no results if batch.time_results.is_empty() { - continue; + continue } if !self.no_storage_info { diff --git a/utils/frame/benchmarking-cli/src/pallet/writer.rs b/utils/frame/benchmarking-cli/src/pallet/writer.rs index 76133f009cc19..a52bbcd229cb1 100644 --- a/utils/frame/benchmarking-cli/src/pallet/writer.rs +++ b/utils/frame/benchmarking-cli/src/pallet/writer.rs @@ -126,7 +126,7 @@ fn map_results( ) -> Result>, std::io::Error> { // Skip if batches is empty. if batches.is_empty() { - return Err(io_error("empty batches")); + return Err(io_error("empty batches")) } let mut all_benchmarks = HashMap::<_, Vec>::new(); @@ -134,7 +134,7 @@ fn map_results( for batch in batches { // Skip if there are no results if batch.time_results.is_empty() { - continue; + continue } let pallet_string = String::from_utf8(batch.pallet.clone()).unwrap(); @@ -405,13 +405,13 @@ pub(crate) fn add_storage_comments( for (key, reads, writes, whitelisted) in &result.keys { // skip keys which are whitelisted if *whitelisted { - continue; + continue } let prefix_length = key.len().min(32); let prefix = key[0..prefix_length].to_vec(); if identified.contains(&prefix) { // skip adding comments for keys we already identified - continue; + continue } else { // track newly identified keys identified.insert(prefix.clone()); @@ -515,7 +515,7 @@ mod test { benchmark: [benchmark.to_vec(), b"_benchmark".to_vec()].concat(), time_results: results.clone(), db_results: results, - }; + } } fn check_data(benchmark: &BenchmarkData, component: &str, base: u128, slope: u128) { diff --git a/utils/frame/benchmarking-cli/src/shared/stats.rs b/utils/frame/benchmarking-cli/src/shared/stats.rs index e9e436d2a7e21..ffae4a17724f8 100644 --- a/utils/frame/benchmarking-cli/src/shared/stats.rs +++ b/utils/frame/benchmarking-cli/src/shared/stats.rs @@ -71,7 +71,7 @@ impl Stats { /// Calculates statistics and returns them. pub fn new(xs: &Vec) -> Result { if xs.is_empty() { - return Err("Empty input is invalid".into()); + return Err("Empty input is invalid".into()) } let (avg, stddev) = Self::avg_and_stddev(xs); diff --git a/utils/frame/benchmarking-cli/src/shared/weight_params.rs b/utils/frame/benchmarking-cli/src/shared/weight_params.rs index d1e9dc959ddd3..030bbfa00d468 100644 --- a/utils/frame/benchmarking-cli/src/shared/weight_params.rs +++ b/utils/frame/benchmarking-cli/src/shared/weight_params.rs @@ -57,7 +57,7 @@ pub struct WeightParams { impl WeightParams { pub fn calc_weight(&self, stat: &Stats) -> Result { if self.weight_mul.is_sign_negative() || !self.weight_mul.is_normal() { - return Err("invalid floating number for `weight_mul`".into()); + return Err("invalid floating number for `weight_mul`".into()) } let s = stat.select(self.weight_metric) as f64; let w = s.mul_add(self.weight_mul, self.weight_add as f64).ceil(); diff --git a/utils/frame/benchmarking-cli/src/storage/cmd.rs b/utils/frame/benchmarking-cli/src/storage/cmd.rs index 005a682ba733e..ce2d52e57d641 100644 --- a/utils/frame/benchmarking-cli/src/storage/cmd.rs +++ b/utils/frame/benchmarking-cli/src/storage/cmd.rs @@ -178,7 +178,7 @@ impl StorageCmd { if let Some((ChildType::ParentKeyId, storage_key)) = ChildType::from_prefixed_key(&PrefixedStorageKey::new(key)) { - return Some(ChildInfo::new_default(storage_key)); + return Some(ChildInfo::new_default(storage_key)) } None } diff --git a/utils/frame/benchmarking-cli/src/storage/write.rs b/utils/frame/benchmarking-cli/src/storage/write.rs index 3b40e6662abca..55a7b60d55552 100644 --- a/utils/frame/benchmarking-cli/src/storage/write.rs +++ b/utils/frame/benchmarking-cli/src/storage/write.rs @@ -99,7 +99,7 @@ impl StorageCmd { state_col, None, ) { - break; + break } } @@ -139,7 +139,7 @@ impl StorageCmd { state_col, Some(&info), ) { - break; + break } } @@ -239,7 +239,7 @@ fn check_new_value( db.sanitize_key(&mut k); if db.get(col, &k).is_some() { trace!("Benchmark-store key creation: Key collision detected, retry"); - return false; + return false } } } diff --git a/utils/frame/frame-utilities-cli/src/pallet_id.rs b/utils/frame/frame-utilities-cli/src/pallet_id.rs index e625f4cfc779d..2a80e3a3d312d 100644 --- a/utils/frame/frame-utilities-cli/src/pallet_id.rs +++ b/utils/frame/frame-utilities-cli/src/pallet_id.rs @@ -63,7 +63,7 @@ impl PalletIdCmd { R::AccountId: Ss58Codec, { if self.id.len() != 8 { - return Err("a module id must be a string of 8 characters".into()); + return Err("a module id must be a string of 8 characters".into()) } let password = self.keystore_params.read_password()?; diff --git a/utils/frame/generate-bags/src/lib.rs b/utils/frame/generate-bags/src/lib.rs index c3fef7307e898..23da131a668d8 100644 --- a/utils/frame/generate-bags/src/lib.rs +++ b/utils/frame/generate-bags/src/lib.rs @@ -94,7 +94,7 @@ fn path_to_header_file() -> Option { for file_name in &["HEADER-APACHE2", "HEADER-GPL3", "HEADER", "file_header.txt"] { let path = workdir.join(file_name); if path.exists() { - return Some(path); + return Some(path) } } None @@ -146,7 +146,7 @@ pub fn thresholds( thresholds.push(successor as VoteWeight); } else { eprintln!("unexpectedly exceeded weight limit; breaking threshold generation loop"); - break; + break } } diff --git a/utils/frame/remote-externalities/src/lib.rs b/utils/frame/remote-externalities/src/lib.rs index 4e6765b73b7f8..86cfc767bf3b5 100644 --- a/utils/frame/remote-externalities/src/lib.rs +++ b/utils/frame/remote-externalities/src/lib.rs @@ -303,7 +303,7 @@ where if page_len < PAGE as usize { log::debug!(target: LOG_TARGET, "last page received: {}", page_len); - break all_keys; + break all_keys } else { let new_last_key = all_keys.last().expect("all_keys is populated; has .last(); qed"); @@ -578,7 +578,7 @@ where Some((ChildType::ParentKeyId, storage_key)) => storage_key, None => { log::error!(target: LOG_TARGET, "invalid key: {:?}", prefixed_top_key); - return Err("Invalid child key"); + return Err("Invalid child key") }, }; @@ -816,7 +816,7 @@ where for (k, v) in top_kv { // skip writing the child root data. if is_default_child_storage_key(k.as_ref()) { - continue; + continue } ext.insert(k.0, v.0); } @@ -956,9 +956,9 @@ mod remote_tests { .into_iter() .map(|d| d.unwrap()) .filter(|p| { - p.path().file_name().unwrap_or_default() == "offline_else_online_works_data" - || p.path().extension().unwrap_or_default() == "top" - || p.path().extension().unwrap_or_default() == "child" + p.path().file_name().unwrap_or_default() == "offline_else_online_works_data" || + p.path().extension().unwrap_or_default() == "top" || + p.path().extension().unwrap_or_default() == "child" }) .collect::>(); assert!(to_delete.len() > 0); @@ -1053,9 +1053,9 @@ mod remote_tests { .into_iter() .map(|d| d.unwrap()) .filter(|p| { - p.path().file_name().unwrap_or_default() == "can_create_top_snapshot_data" - || p.path().extension().unwrap_or_default() == "top" - || p.path().extension().unwrap_or_default() == "child" + p.path().file_name().unwrap_or_default() == "can_create_top_snapshot_data" || + p.path().extension().unwrap_or_default() == "top" || + p.path().extension().unwrap_or_default() == "child" }) .collect::>(); @@ -1094,9 +1094,9 @@ mod remote_tests { .into_iter() .map(|d| d.unwrap()) .filter(|p| { - p.path().file_name().unwrap_or_default() == "can_create_child_snapshot_data" - || p.path().extension().unwrap_or_default() == "top" - || p.path().extension().unwrap_or_default() == "child" + p.path().file_name().unwrap_or_default() == "can_create_child_snapshot_data" || + p.path().extension().unwrap_or_default() == "top" || + p.path().extension().unwrap_or_default() == "child" }) .collect::>(); @@ -1132,9 +1132,9 @@ mod remote_tests { .into_iter() .map(|d| d.unwrap()) .filter(|p| { - p.path().file_name().unwrap_or_default() == "can_fetch_all_data" - || p.path().extension().unwrap_or_default() == "top" - || p.path().extension().unwrap_or_default() == "child" + p.path().file_name().unwrap_or_default() == "can_fetch_all_data" || + p.path().extension().unwrap_or_default() == "top" || + p.path().extension().unwrap_or_default() == "child" }) .collect::>(); diff --git a/utils/frame/rpc/state-trie-migration-rpc/src/lib.rs b/utils/frame/rpc/state-trie-migration-rpc/src/lib.rs index 74f68e9238d5a..ab180c7d45d5b 100644 --- a/utils/frame/rpc/state-trie-migration-rpc/src/lib.rs +++ b/utils/frame/rpc/state-trie-migration-rpc/src/lib.rs @@ -52,15 +52,14 @@ fn count_migrate<'a, H: Hasher>( for node in iter_node { let node = node.map_err(|e| format!("TrieDB node iterator error: {}", e))?; match node.2.node_plan() { - NodePlan::Leaf { value, .. } | NodePlan::NibbledBranch { value: Some(value), .. } => { + NodePlan::Leaf { value, .. } | NodePlan::NibbledBranch { value: Some(value), .. } => if let ValuePlan::Inline(range) = value { - if (range.end - range.start) as u32 - >= sp_core::storage::TRIE_VALUE_NODE_THRESHOLD + if (range.end - range.start) as u32 >= + sp_core::storage::TRIE_VALUE_NODE_THRESHOLD { nb += 1; } - } - }, + }, _ => (), } } diff --git a/utils/frame/try-runtime/cli/src/lib.rs b/utils/frame/try-runtime/cli/src/lib.rs index b0e5ffe872702..f54354342bf28 100644 --- a/utils/frame/try-runtime/cli/src/lib.rs +++ b/utils/frame/try-runtime/cli/src/lib.rs @@ -132,7 +132,7 @@ //! added, given the right flag: //! //! ```ignore -//! +//! //! #[cfg(feature = try-runtime)] //! fn pre_upgrade() -> Result, &'static str> {} //! @@ -504,11 +504,10 @@ impl State { ::Err: Debug, { Ok(match self { - State::Snap { snapshot_path } => { + State::Snap { snapshot_path } => Builder::::new().mode(Mode::Offline(OfflineConfig { state_snapshot: SnapshotConfig::new(snapshot_path), - })) - }, + })), State::Live { snapshot_path, pallet, uri, at, child_tree } => { let at = match at { Some(at_str) => Some(hash_of::(at_str)?), @@ -554,38 +553,34 @@ impl TryRuntimeCmd { ExecDispatch: NativeExecutionDispatch + 'static, { match &self.command { - Command::OnRuntimeUpgrade(ref cmd) => { + Command::OnRuntimeUpgrade(ref cmd) => commands::on_runtime_upgrade::on_runtime_upgrade::( self.shared.clone(), cmd.clone(), config, ) - .await - }, - Command::OffchainWorker(cmd) => { + .await, + Command::OffchainWorker(cmd) => commands::offchain_worker::offchain_worker::( self.shared.clone(), cmd.clone(), config, ) - .await - }, - Command::ExecuteBlock(cmd) => { + .await, + Command::ExecuteBlock(cmd) => commands::execute_block::execute_block::( self.shared.clone(), cmd.clone(), config, ) - .await - }, - Command::FollowChain(cmd) => { + .await, + Command::FollowChain(cmd) => commands::follow_chain::follow_chain::( self.shared.clone(), cmd.clone(), config, ) - .await - }, + .await, } } } diff --git a/utils/wasm-builder/src/builder.rs b/utils/wasm-builder/src/builder.rs index 579b3792d49b5..81a8693968188 100644 --- a/utils/wasm-builder/src/builder.rs +++ b/utils/wasm-builder/src/builder.rs @@ -156,7 +156,7 @@ impl WasmBuilder { provide_dummy_wasm_binary_if_not_exist(&file_path); - return; + return } build_project( @@ -186,8 +186,8 @@ fn generate_crate_skip_build_env_name() -> String { /// Checks if the build of the WASM binary should be skipped. fn check_skip_build() -> bool { - env::var(crate::SKIP_BUILD_ENV).is_ok() - || env::var(generate_crate_skip_build_env_name()).is_ok() + env::var(crate::SKIP_BUILD_ENV).is_ok() || + env::var(generate_crate_skip_build_env_name()).is_ok() } /// Provide a dummy WASM binary if there doesn't exist one. diff --git a/utils/wasm-builder/src/lib.rs b/utils/wasm-builder/src/lib.rs index adcf45417225b..fc86a06170a50 100644 --- a/utils/wasm-builder/src/lib.rs +++ b/utils/wasm-builder/src/lib.rs @@ -254,9 +254,8 @@ impl CargoCommand { // variable is set, we can assume that whatever rust compiler we have, it is a nightly // compiler. For "more" information, see: // https://github.com/rust-lang/rust/blob/fa0f7d0080d8e7e9eb20aa9cbf8013f96c81287f/src/libsyntax/feature_gate/check.rs#L891 - env::var("RUSTC_BOOTSTRAP").is_ok() - || self - .command() + env::var("RUSTC_BOOTSTRAP").is_ok() || + self.command() .arg("--version") .output() .map_err(|_| ()) diff --git a/utils/wasm-builder/src/prerequisites.rs b/utils/wasm-builder/src/prerequisites.rs index 9c37d8279b957..fb04dc3c98fb2 100644 --- a/utils/wasm-builder/src/prerequisites.rs +++ b/utils/wasm-builder/src/prerequisites.rs @@ -38,7 +38,7 @@ pub(crate) fn check() -> Result { let cargo_command = crate::get_nightly_cargo(); if !cargo_command.is_nightly() { - return Err(print_error_message("Rust nightly not installed, please install it!")); + return Err(print_error_message("Rust nightly not installed, please install it!")) } check_wasm_toolchain_installed(cargo_command) @@ -160,9 +160,8 @@ fn check_wasm_toolchain_installed( )) } else { match String::from_utf8(s.stderr) { - Ok(ref err) if err.contains("linker `rust-lld` not found") => { - Err(print_error_message("`rust-lld` not found, please install it!")) - }, + Ok(ref err) if err.contains("linker `rust-lld` not found") => + Err(print_error_message("`rust-lld` not found, please install it!")), Ok(ref err) => Err(format!( "{}\n\n{}\n{}\n{}{}\n", err_msg, diff --git a/utils/wasm-builder/src/wasm_project.rs b/utils/wasm-builder/src/wasm_project.rs index c0ea5a2641c21..07219676413fc 100644 --- a/utils/wasm-builder/src/wasm_project.rs +++ b/utils/wasm-builder/src/wasm_project.rs @@ -194,11 +194,11 @@ fn find_cargo_lock(cargo_manifest: &Path) -> Option { fn find_impl(mut path: PathBuf) -> Option { loop { if path.join("Cargo.lock").exists() { - return Some(path.join("Cargo.lock")); + return Some(path.join("Cargo.lock")) } if !path.pop() { - return None; + return None } } } @@ -207,7 +207,7 @@ fn find_cargo_lock(cargo_manifest: &Path) -> Option { let path = PathBuf::from(workspace); if path.join("Cargo.lock").exists() { - return Some(path.join("Cargo.lock")); + return Some(path.join("Cargo.lock")) } else { build_helper::warning!( "`{}` env variable doesn't point to a directory that contains a `Cargo.lock`.", @@ -217,7 +217,7 @@ fn find_cargo_lock(cargo_manifest: &Path) -> Option { } if let Some(path) = find_impl(build_helper::out_dir()) { - return Some(path); + return Some(path) } build_helper::warning!( @@ -262,11 +262,10 @@ fn get_wasm_workspace_root() -> PathBuf { loop { match out_dir.parent() { Some(parent) if out_dir.ends_with("build") => return parent.to_path_buf(), - _ => { + _ => if !out_dir.pop() { - break; - } - }, + break + }, } } @@ -405,20 +404,19 @@ fn project_enabled_features( // this heuristic anymore. However, for the transition phase between now and namespaced // features already being present in nightly, we need this code to make // runtimes compile with all the possible rustc versions. - if v.len() == 1 - && v.get(0).map_or(false, |v| *v == format!("dep:{}", f)) - && std_enabled.as_ref().map(|e| e.iter().any(|ef| ef == *f)).unwrap_or(false) + if v.len() == 1 && + v.get(0).map_or(false, |v| *v == format!("dep:{}", f)) && + std_enabled.as_ref().map(|e| e.iter().any(|ef| ef == *f)).unwrap_or(false) { - return false; + return false } // We don't want to enable the `std`/`default` feature for the wasm build and // we need to check if the feature is enabled by checking the env variable. - *f != "std" - && *f != "default" - && env::var(format!("CARGO_FEATURE_{}", feature_env)) - .map(|v| v == "1") - .unwrap_or_default() + *f != "std" && + *f != "default" && env::var(format!("CARGO_FEATURE_{}", feature_env)) + .map(|v| v == "1") + .unwrap_or_default() }) .map(|d| d.0.clone()) .collect::>(); From 95493ad6f8eee1415197186ea5a82f63e5896e5d Mon Sep 17 00:00:00 2001 From: lana-shanghai Date: Thu, 15 Dec 2022 14:47:23 +0100 Subject: [PATCH 044/101] Remove benchmarking, unused clone() --- .../nft-fractionalisation/src/benchmarking.rs | 20 ------------------- frame/nft-fractionalisation/src/lib.rs | 14 ++++++++----- 2 files changed, 9 insertions(+), 25 deletions(-) delete mode 100644 frame/nft-fractionalisation/src/benchmarking.rs diff --git a/frame/nft-fractionalisation/src/benchmarking.rs b/frame/nft-fractionalisation/src/benchmarking.rs deleted file mode 100644 index d496a9fc89b1a..0000000000000 --- a/frame/nft-fractionalisation/src/benchmarking.rs +++ /dev/null @@ -1,20 +0,0 @@ -//! Benchmarking setup for pallet-template - -use super::*; - -#[allow(unused)] -use crate::Pallet as Template; -use frame_benchmarking::{benchmarks, whitelisted_caller}; -use frame_system::RawOrigin; - -benchmarks! { - do_something { - let s in 0 .. 100; - let caller: T::AccountId = whitelisted_caller(); - }: _(RawOrigin::Signed(caller), s) - verify { - assert_eq!(Something::::get(), Some(s)); - } - - impl_benchmark_test_suite!(Template, crate::mock::new_test_ext(), crate::mock::Test); -} diff --git a/frame/nft-fractionalisation/src/lib.rs b/frame/nft-fractionalisation/src/lib.rs index ad5fdb255522f..2589d12410eba 100644 --- a/frame/nft-fractionalisation/src/lib.rs +++ b/frame/nft-fractionalisation/src/lib.rs @@ -132,11 +132,13 @@ pub mod pallet { #[pallet::error] pub enum Error { - AssetAlreadyRegistered, + AssetDataNotFound, + NFTDataNotFound } #[pallet::call] impl Pallet { + // TODO: correct weights #[pallet::weight(10_000 + T::DbWeight::get().writes(2).ref_time())] /// Pallet's account must be funded before lock is possible! /// 5EYCAe5gjC5dxKPbV2GPQUetETjFNSYZsSwSurVTTXidSLbh @@ -149,7 +151,7 @@ pub mod pallet { min_balance: AssetBalanceOf, amount: AssetBalanceOf, ) -> DispatchResult { - let _who = ensure_signed(origin.clone())?; + let _who = ensure_signed(origin)?; let admin_account_id = Self::pallet_account_id(); Self::do_lock_nft(collection_id, item_id)?; @@ -184,14 +186,16 @@ pub mod pallet { /// Return and burn a % of the asset, unlock the NFT. Currently 100% is the minimum /// threshold. + // TODO: correct weights #[pallet::weight(10_000 + T::DbWeight::get().writes(2).ref_time())] pub fn burn_asset_unlock_nft( origin: OriginFor, asset_id: AssetIdOf, amount: AssetBalanceOf, ) -> DispatchResult { - let who = ensure_signed(origin.clone())?; + let who = ensure_signed(origin)?; + ensure!(>::contains_key(asset_id), Error::::NFTDataNotFound); let (collection_id, item_id) = Self::get_nft_id(asset_id); Self::do_burn_asset(asset_id, &who, amount)?; @@ -243,7 +247,7 @@ pub mod pallet { /// Assert that the `asset_id` was created by means of locking an NFT and fetch /// its `CollectionId` and `ItemId`. fn get_nft_id(asset_id: AssetIdOf) -> (T::CollectionId, T::ItemId) { - assert_eq!(>::contains_key(asset_id), true); + // Check for explicit existence of the value in the extrinsic. >::get(asset_id).unwrap() } @@ -273,7 +277,7 @@ pub mod pallet { amount: AssetBalanceOf, ) -> Result, DispatchError> { // Assert that the asset exists in storage. - assert_eq!(>::contains_key(asset_id), true); + ensure!(>::contains_key(asset_id), Error::::NFTDataNotFound); Self::check_token_amount(asset_id, amount); T::Assets::burn_from(asset_id, account, amount) } From f07e7d08d9bc6d5895ec4ec4441c82aff89cd3bf Mon Sep 17 00:00:00 2001 From: Jegor Sidorenko <5252494+jsidorenko@users.noreply.github.com> Date: Tue, 20 Dec 2022 17:03:11 +0200 Subject: [PATCH 045/101] Update frame/support/src/traits/tokens/nonfungible_v2.rs Co-authored-by: joe petrowski <25483142+joepetrowski@users.noreply.github.com> --- frame/support/src/traits/tokens/nonfungible_v2.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/frame/support/src/traits/tokens/nonfungible_v2.rs b/frame/support/src/traits/tokens/nonfungible_v2.rs index ab0e72b3c8286..f939d9a6f3833 100644 --- a/frame/support/src/traits/tokens/nonfungible_v2.rs +++ b/frame/support/src/traits/tokens/nonfungible_v2.rs @@ -15,13 +15,13 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Traits for dealing with a single non-fungible collection of items. +//! Traits for dealing with a single non-fungible item. //! -//! This assumes a single level namespace identified by `Inspect::ItemId`, and could -//! reasonably be implemented by pallets which wants to expose a single collection of NFT-like +//! This assumes a single-level namespace identified by `Inspect::ItemId`, and could +//! reasonably be implemented by pallets that want to expose a single collection of NFT-like //! objects. //! -//! For an NFT API which has dual-level namespacing, the traits in `nonfungibles` are better to +//! For an NFT API that has dual-level namespacing, the traits in `nonfungibles` are better to //! use. use super::nonfungibles_v2 as nonfungibles; From b1fe6f763b61caaac1ab16edde7cfa408edbfbfc Mon Sep 17 00:00:00 2001 From: Jegor Sidorenko <5252494+jsidorenko@users.noreply.github.com> Date: Tue, 20 Dec 2022 17:15:22 +0200 Subject: [PATCH 046/101] Update frame/support/src/traits/tokens/nonfungible_v2.rs Co-authored-by: joe petrowski <25483142+joepetrowski@users.noreply.github.com> --- frame/support/src/traits/tokens/nonfungible_v2.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/frame/support/src/traits/tokens/nonfungible_v2.rs b/frame/support/src/traits/tokens/nonfungible_v2.rs index f939d9a6f3833..d843d18b0fcca 100644 --- a/frame/support/src/traits/tokens/nonfungible_v2.rs +++ b/frame/support/src/traits/tokens/nonfungible_v2.rs @@ -33,7 +33,7 @@ use codec::{Decode, Encode}; use sp_runtime::TokenError; use sp_std::prelude::*; -/// Trait for providing an interface to a read-only NFT-like set of items. +/// Trait for providing an interface to a read-only NFT-like item. pub trait Inspect { /// Type for identifying an item. type ItemId; From 917bb1e544df09b741c61e7a2431bf390b858211 Mon Sep 17 00:00:00 2001 From: Jegor Sidorenko <5252494+jsidorenko@users.noreply.github.com> Date: Tue, 20 Dec 2022 17:19:31 +0200 Subject: [PATCH 047/101] Update frame/support/src/traits/tokens/nonfungible_v2.rs Co-authored-by: joe petrowski <25483142+joepetrowski@users.noreply.github.com> --- frame/support/src/traits/tokens/nonfungible_v2.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/frame/support/src/traits/tokens/nonfungible_v2.rs b/frame/support/src/traits/tokens/nonfungible_v2.rs index d843d18b0fcca..cc8dafcbd332f 100644 --- a/frame/support/src/traits/tokens/nonfungible_v2.rs +++ b/frame/support/src/traits/tokens/nonfungible_v2.rs @@ -135,7 +135,7 @@ pub trait Transfer: Inspect { fn transfer(item: &Self::ItemId, destination: &AccountId) -> DispatchResult; } -/// Convert a `fungibles` trait implementation into a `fungible` trait implementation by identifying +/// Convert a `nonfungibles` trait implementation into a `nonfungible` trait implementation by identifying /// a single item. pub struct ItemOf< F: nonfungibles::Inspect, From 92f43f6da01efb471679850012e9088a80c78fbe Mon Sep 17 00:00:00 2001 From: Jegor Sidorenko <5252494+jsidorenko@users.noreply.github.com> Date: Tue, 20 Dec 2022 17:21:10 +0200 Subject: [PATCH 048/101] Update frame/support/src/traits/tokens/nonfungibles_v2.rs Co-authored-by: joe petrowski <25483142+joepetrowski@users.noreply.github.com> --- frame/support/src/traits/tokens/nonfungibles_v2.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/frame/support/src/traits/tokens/nonfungibles_v2.rs b/frame/support/src/traits/tokens/nonfungibles_v2.rs index 09b4793832d7e..14b051df7980a 100644 --- a/frame/support/src/traits/tokens/nonfungibles_v2.rs +++ b/frame/support/src/traits/tokens/nonfungibles_v2.rs @@ -157,7 +157,7 @@ pub trait Destroy: Inspect { /// * `collection`: The `CollectionId` to be destroyed. /// * `witness`: Any witness data that needs to be provided to complete the operation /// successfully. - /// * `maybe_check_owner`: An optional account id that can be used to authorize the destroy + /// * `maybe_check_owner`: An optional `AccountId` that can be used to authorize the destroy /// command. If not provided, we will not do any authorization checks before destroying the /// item. /// From 30985daa3be96d07cecb9fa568b8be12c191225d Mon Sep 17 00:00:00 2001 From: Jegor Sidorenko <5252494+jsidorenko@users.noreply.github.com> Date: Tue, 20 Dec 2022 17:29:00 +0200 Subject: [PATCH 049/101] Update frame/nfts/src/lib.rs Co-authored-by: joe petrowski <25483142+joepetrowski@users.noreply.github.com> --- frame/nfts/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/frame/nfts/src/lib.rs b/frame/nfts/src/lib.rs index 6267df71a9a20..81b8987265c89 100644 --- a/frame/nfts/src/lib.rs +++ b/frame/nfts/src/lib.rs @@ -1533,7 +1533,7 @@ pub mod pallet { /// the `collection`. /// /// - `collection`: The identifier of the collection to change. - /// - `max_supply`: The maximum amount of items a collection could have. + /// - `max_supply`: The maximum number of items a collection could have. /// /// Emits `CollectionMaxSupplySet` event when successful. #[pallet::weight(T::WeightInfo::set_collection_max_supply())] From 0d1a442e914be23fbea273ff4ed95494cba7d732 Mon Sep 17 00:00:00 2001 From: Jegor Sidorenko <5252494+jsidorenko@users.noreply.github.com> Date: Tue, 20 Dec 2022 17:29:20 +0200 Subject: [PATCH 050/101] Update frame/nfts/src/lib.rs Co-authored-by: joe petrowski <25483142+joepetrowski@users.noreply.github.com> --- frame/nfts/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/frame/nfts/src/lib.rs b/frame/nfts/src/lib.rs index 81b8987265c89..f9d378194cd1d 100644 --- a/frame/nfts/src/lib.rs +++ b/frame/nfts/src/lib.rs @@ -1527,7 +1527,7 @@ pub mod pallet { Self::do_set_accept_ownership(who, maybe_collection) } - /// Set the maximum amount of items a collection could have. + /// Set the maximum number of items a collection could have. /// /// Origin must be either `ForceOrigin` or `Signed` and the sender should be the Owner of /// the `collection`. From d429d6e920294694a4a7bc1ebf7818004359da1d Mon Sep 17 00:00:00 2001 From: Jegor Sidorenko <5252494+jsidorenko@users.noreply.github.com> Date: Tue, 20 Dec 2022 17:39:39 +0200 Subject: [PATCH 051/101] Update frame/nfts/src/lib.rs Co-authored-by: joe petrowski <25483142+joepetrowski@users.noreply.github.com> --- frame/nfts/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/frame/nfts/src/lib.rs b/frame/nfts/src/lib.rs index f9d378194cd1d..db3bc52786274 100644 --- a/frame/nfts/src/lib.rs +++ b/frame/nfts/src/lib.rs @@ -929,7 +929,7 @@ pub mod pallet { /// whose existing deposit is less than the refreshed deposit as it would only cost them, /// so it's of little consequence. /// - /// It will still return an error in the case that the collection is unknown of the signer + /// It will still return an error in the case that the collection is unknown or the signer /// is not permitted to call it. /// /// Weight: `O(items.len())` From 5de99c8460a5772c3d9d963f8893ee50f3a658d9 Mon Sep 17 00:00:00 2001 From: Jegor Sidorenko <5252494+jsidorenko@users.noreply.github.com> Date: Tue, 20 Dec 2022 17:40:06 +0200 Subject: [PATCH 052/101] Update frame/nfts/src/lib.rs Co-authored-by: joe petrowski <25483142+joepetrowski@users.noreply.github.com> --- frame/nfts/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/frame/nfts/src/lib.rs b/frame/nfts/src/lib.rs index db3bc52786274..108060c23b989 100644 --- a/frame/nfts/src/lib.rs +++ b/frame/nfts/src/lib.rs @@ -455,7 +455,7 @@ pub mod pallet { CollectionMaxSupplySet { collection: T::CollectionId, max_supply: u32 }, /// Mint settings for a collection had changed. CollectionMintSettingsUpdated { collection: T::CollectionId }, - /// Event gets emmited when the `NextCollectionId` gets incremented. + /// Event gets emitted when the `NextCollectionId` gets incremented. NextCollectionIdIncremented { next_id: T::CollectionId }, /// The price was set for the instance. ItemPriceSet { From dcbce57cc33d5da42248fb4c4b4f58918ec67e2c Mon Sep 17 00:00:00 2001 From: Jegor Sidorenko <5252494+jsidorenko@users.noreply.github.com> Date: Tue, 20 Dec 2022 17:41:20 +0200 Subject: [PATCH 053/101] Update frame/nfts/src/lib.rs Co-authored-by: joe petrowski <25483142+joepetrowski@users.noreply.github.com> --- frame/nfts/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/frame/nfts/src/lib.rs b/frame/nfts/src/lib.rs index 108060c23b989..e95b3bea65d00 100644 --- a/frame/nfts/src/lib.rs +++ b/frame/nfts/src/lib.rs @@ -420,7 +420,7 @@ pub mod pallet { }, /// Metadata has been cleared for an item. MetadataCleared { collection: T::CollectionId, item: T::ItemId }, - /// Metadata has been cleared for an item. + /// The deposit for a set of `item`s within a `collection` has been updated. Redeposited { collection: T::CollectionId, successful_items: Vec }, /// New attribute metadata has been set for a `collection` or `item`. AttributeSet { From 1db5061013ba4207d99492c117008d93053d61fc Mon Sep 17 00:00:00 2001 From: Jegor Sidorenko <5252494+jsidorenko@users.noreply.github.com> Date: Tue, 20 Dec 2022 17:44:50 +0200 Subject: [PATCH 054/101] Update frame/support/src/traits/tokens/nonfungible_v2.rs --- frame/support/src/traits/tokens/nonfungible_v2.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/frame/support/src/traits/tokens/nonfungible_v2.rs b/frame/support/src/traits/tokens/nonfungible_v2.rs index cc8dafcbd332f..187127098db16 100644 --- a/frame/support/src/traits/tokens/nonfungible_v2.rs +++ b/frame/support/src/traits/tokens/nonfungible_v2.rs @@ -129,7 +129,7 @@ pub trait Mutate: Inspect { } } -/// Trait for providing a non-fungible set of items which can only be transferred. +/// Trait for transferring a non-fungible item. pub trait Transfer: Inspect { /// Transfer `item` into `destination` account. fn transfer(item: &Self::ItemId, destination: &AccountId) -> DispatchResult; From 7653b99b2b3031abe741da778114124e770d3b78 Mon Sep 17 00:00:00 2001 From: Jegor Sidorenko <5252494+jsidorenko@users.noreply.github.com> Date: Tue, 20 Dec 2022 17:50:27 +0200 Subject: [PATCH 055/101] Update frame/nfts/src/lib.rs Co-authored-by: joe petrowski <25483142+joepetrowski@users.noreply.github.com> --- frame/nfts/src/lib.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/frame/nfts/src/lib.rs b/frame/nfts/src/lib.rs index e95b3bea65d00..a4b425a78b590 100644 --- a/frame/nfts/src/lib.rs +++ b/frame/nfts/src/lib.rs @@ -457,14 +457,14 @@ pub mod pallet { CollectionMintSettingsUpdated { collection: T::CollectionId }, /// Event gets emitted when the `NextCollectionId` gets incremented. NextCollectionIdIncremented { next_id: T::CollectionId }, - /// The price was set for the instance. + /// The price was set for the item. ItemPriceSet { collection: T::CollectionId, item: T::ItemId, price: ItemPrice, whitelisted_buyer: Option, }, - /// The price for the instance was removed. + /// The price for the item was removed. ItemPriceRemoved { collection: T::CollectionId, item: T::ItemId }, /// An item was bought. ItemBought { From 46f645160b774b4c36053f6a142c8fdf633989ee Mon Sep 17 00:00:00 2001 From: Jegor Sidorenko <5252494+jsidorenko@users.noreply.github.com> Date: Tue, 20 Dec 2022 17:51:15 +0200 Subject: [PATCH 056/101] Update frame/support/src/traits/tokens/nonfungibles_v2.rs Co-authored-by: joe petrowski <25483142+joepetrowski@users.noreply.github.com> --- frame/support/src/traits/tokens/nonfungibles_v2.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/frame/support/src/traits/tokens/nonfungibles_v2.rs b/frame/support/src/traits/tokens/nonfungibles_v2.rs index 14b051df7980a..d2f5f5529fa96 100644 --- a/frame/support/src/traits/tokens/nonfungibles_v2.rs +++ b/frame/support/src/traits/tokens/nonfungibles_v2.rs @@ -246,7 +246,7 @@ pub trait Mutate: Inspect { } } -/// Trait for providing a non-fungible sets of items which can only be transferred. +/// Trait for transferring non-fungible sets of items. pub trait Transfer: Inspect { /// Transfer `item` of `collection` into `destination` account. fn transfer( From 91ffabbff12fc72837544420b4539030d81fcd86 Mon Sep 17 00:00:00 2001 From: Jegor Sidorenko <5252494+jsidorenko@users.noreply.github.com> Date: Tue, 20 Dec 2022 17:51:40 +0200 Subject: [PATCH 057/101] Update frame/nfts/src/lib.rs Co-authored-by: joe petrowski <25483142+joepetrowski@users.noreply.github.com> --- frame/nfts/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/frame/nfts/src/lib.rs b/frame/nfts/src/lib.rs index a4b425a78b590..8ef9b0b14fe48 100644 --- a/frame/nfts/src/lib.rs +++ b/frame/nfts/src/lib.rs @@ -553,7 +553,7 @@ pub mod pallet { MaxSupplyReached, /// The max supply is locked and can't be changed. MaxSupplyLocked, - /// The provided max supply is less to the amount of items a collection already has. + /// The provided max supply is less than the number of items a collection already has. MaxSupplyTooSmall, /// The given item ID is unknown. UnknownItem, From 8057d72ac27fe25e19d3ecfb62215d39fe074aba Mon Sep 17 00:00:00 2001 From: Jegor Sidorenko <5252494+jsidorenko@users.noreply.github.com> Date: Tue, 20 Dec 2022 17:51:59 +0200 Subject: [PATCH 058/101] Update frame/nfts/src/lib.rs Co-authored-by: joe petrowski <25483142+joepetrowski@users.noreply.github.com> --- frame/nfts/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/frame/nfts/src/lib.rs b/frame/nfts/src/lib.rs index 8ef9b0b14fe48..c033c4373f649 100644 --- a/frame/nfts/src/lib.rs +++ b/frame/nfts/src/lib.rs @@ -567,7 +567,7 @@ pub mod pallet { ReachedApprovalLimit, /// The deadline has already expired. DeadlineExpired, - /// The duration provided should be less or equal to MaxDeadlineDuration. + /// The duration provided should be less than or equal to `MaxDeadlineDuration`. WrongDuration, /// The method is disabled by system settings. MethodDisabled, From d0823006b297ea9537224a40457e43b9a527e0e1 Mon Sep 17 00:00:00 2001 From: Jegor Sidorenko <5252494+jsidorenko@users.noreply.github.com> Date: Tue, 20 Dec 2022 17:52:44 +0200 Subject: [PATCH 059/101] Update frame/nfts/src/lib.rs Co-authored-by: joe petrowski <25483142+joepetrowski@users.noreply.github.com> --- frame/nfts/src/lib.rs | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/frame/nfts/src/lib.rs b/frame/nfts/src/lib.rs index c033c4373f649..629f5baa97158 100644 --- a/frame/nfts/src/lib.rs +++ b/frame/nfts/src/lib.rs @@ -643,9 +643,8 @@ pub mod pallet { /// Unlike `create`, no funds are reserved. /// /// - `owner`: The owner of this collection of items. The owner has full superuser - /// permissions - /// over this item, but may later change and configure the permissions using - /// `transfer_ownership` and `set_team`. + /// permissions over this item, but may later change and configure the permissions using + /// `transfer_ownership` and `set_team`. /// /// Emits `ForceCreated` event when successful. /// From 8f18f088cfc1adf2fe5f92ff87c9a502d38af203 Mon Sep 17 00:00:00 2001 From: Jegor Sidorenko <5252494+jsidorenko@users.noreply.github.com> Date: Tue, 20 Dec 2022 17:57:07 +0200 Subject: [PATCH 060/101] Update frame/nfts/src/lib.rs Co-authored-by: joe petrowski <25483142+joepetrowski@users.noreply.github.com> --- frame/nfts/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/frame/nfts/src/lib.rs b/frame/nfts/src/lib.rs index 629f5baa97158..4555997f5b904 100644 --- a/frame/nfts/src/lib.rs +++ b/frame/nfts/src/lib.rs @@ -571,7 +571,7 @@ pub mod pallet { WrongDuration, /// The method is disabled by system settings. MethodDisabled, - /// The provided is setting can't be set. + /// The provided setting can't be set. WrongSetting, /// Item's config already exists and should be equal to the provided one. InconsistentItemConfig, From 27e2f38d534abec384bc19eaf738e6cd9aa3afb2 Mon Sep 17 00:00:00 2001 From: Jegor Sidorenko Date: Tue, 20 Dec 2022 18:04:39 +0200 Subject: [PATCH 061/101] Address comments --- frame/nfts/src/benchmarking.rs | 6 +++--- frame/nfts/src/features/approvals.rs | 2 +- frame/nfts/src/features/metadata.rs | 4 ++-- frame/nfts/src/lib.rs | 18 +++++++++--------- 4 files changed, 15 insertions(+), 15 deletions(-) diff --git a/frame/nfts/src/benchmarking.rs b/frame/nfts/src/benchmarking.rs index 4e392b147c4e8..524b3662fe5b2 100644 --- a/frame/nfts/src/benchmarking.rs +++ b/frame/nfts/src/benchmarking.rs @@ -469,7 +469,7 @@ benchmarks_instance_pallet! { let (item, ..) = mint_item::(0); }: _(SystemOrigin::Signed(caller), collection, item, data.clone()) verify { - assert_last_event::(Event::MetadataSet { collection, item, data }.into()); + assert_last_event::(Event::ItemMetadataSet { collection, item, data }.into()); } clear_metadata { @@ -478,7 +478,7 @@ benchmarks_instance_pallet! { add_item_metadata::(item); }: _(SystemOrigin::Signed(caller), collection, item) verify { - assert_last_event::(Event::MetadataCleared { collection, item }.into()); + assert_last_event::(Event::ItemMetadataCleared { collection, item }.into()); } set_collection_metadata { @@ -506,7 +506,7 @@ benchmarks_instance_pallet! { let deadline = T::BlockNumber::max_value(); }: _(SystemOrigin::Signed(caller.clone()), collection, item, delegate_lookup, Some(deadline)) verify { - assert_last_event::(Event::ApprovedTransfer { collection, item, owner: caller, delegate, deadline: Some(deadline) }.into()); + assert_last_event::(Event::TransferApproved { collection, item, owner: caller, delegate, deadline: Some(deadline) }.into()); } cancel_approval { diff --git a/frame/nfts/src/features/approvals.rs b/frame/nfts/src/features/approvals.rs index 0cbceb9113d0c..cb5279fd949db 100644 --- a/frame/nfts/src/features/approvals.rs +++ b/frame/nfts/src/features/approvals.rs @@ -54,7 +54,7 @@ impl, I: 'static> Pallet { .map_err(|_| Error::::ReachedApprovalLimit)?; Item::::insert(&collection, &item, &details); - Self::deposit_event(Event::ApprovedTransfer { + Self::deposit_event(Event::TransferApproved { collection, item, owner: details.owner, diff --git a/frame/nfts/src/features/metadata.rs b/frame/nfts/src/features/metadata.rs index 3a12dbe64f2f4..942f377141a33 100644 --- a/frame/nfts/src/features/metadata.rs +++ b/frame/nfts/src/features/metadata.rs @@ -65,7 +65,7 @@ impl, I: 'static> Pallet { *metadata = Some(ItemMetadata { deposit, data: data.clone() }); Collection::::insert(&collection, &collection_details); - Self::deposit_event(Event::MetadataSet { collection, item, data }); + Self::deposit_event(Event::ItemMetadataSet { collection, item, data }); Ok(()) }) } @@ -96,7 +96,7 @@ impl, I: 'static> Pallet { collection_details.owner_deposit.saturating_reduce(deposit); Collection::::insert(&collection, &collection_details); - Self::deposit_event(Event::MetadataCleared { collection, item }); + Self::deposit_event(Event::ItemMetadataCleared { collection, item }); Ok(()) }) } diff --git a/frame/nfts/src/lib.rs b/frame/nfts/src/lib.rs index 4555997f5b904..77353ce3359d2 100644 --- a/frame/nfts/src/lib.rs +++ b/frame/nfts/src/lib.rs @@ -15,7 +15,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! # Unique (Items) Module +//! # Nfts Module //! //! A simple, secure module for dealing with non-fungible items. //! @@ -389,7 +389,7 @@ pub mod pallet { }, /// An `item` of a `collection` has been approved by the `owner` for transfer by /// a `delegate`. - ApprovedTransfer { + TransferApproved { collection: T::CollectionId, item: T::ItemId, owner: T::AccountId, @@ -413,13 +413,13 @@ pub mod pallet { /// Metadata has been cleared for a `collection`. CollectionMetadataCleared { collection: T::CollectionId }, /// New metadata has been set for an item. - MetadataSet { + ItemMetadataSet { collection: T::CollectionId, item: T::ItemId, data: BoundedVec, }, /// Metadata has been cleared for an item. - MetadataCleared { collection: T::CollectionId, item: T::ItemId }, + ItemMetadataCleared { collection: T::CollectionId, item: T::ItemId }, /// The deposit for a set of `item`s within a `collection` has been updated. Redeposited { collection: T::CollectionId, successful_items: Vec }, /// New attribute metadata has been set for a `collection` or `item`. @@ -1146,7 +1146,7 @@ pub mod pallet { /// - `maybe_deadline`: Optional deadline for the approval. Specified by providing the /// number of blocks after which the approval will expire /// - /// Emits `ApprovedTransfer` on success. + /// Emits `TransferApproved` on success. /// /// Weight: `O(1)` #[pallet::weight(T::WeightInfo::approve_transfer())] @@ -1416,7 +1416,7 @@ pub mod pallet { /// - `item`: The identifier of the item whose metadata to set. /// - `data`: The general information of this item. Limited in length by `StringLimit`. /// - /// Emits `MetadataSet`. + /// Emits `ItemMetadataSet`. /// /// Weight: `O(1)` #[pallet::weight(T::WeightInfo::set_metadata())] @@ -1442,7 +1442,7 @@ pub mod pallet { /// - `collection`: The identifier of the collection whose item's metadata to clear. /// - `item`: The identifier of the item whose metadata to clear. /// - /// Emits `MetadataCleared`. + /// Emits `ItemMetadataCleared`. /// /// Weight: `O(1)` #[pallet::weight(T::WeightInfo::clear_metadata())] @@ -1644,8 +1644,8 @@ pub mod pallet { /// - `desired_collection`: The collection of the desired item. /// - `desired_item`: The desired item an owner wants to receive. /// - `maybe_price`: The price an owner is willing to pay or receive for the desired `item`. - /// - `maybe_duration`: Optional deadline for the swap. Specified by providing the - /// number of blocks after which the swap will expire. + /// - `duration`: A deadline for the swap. Specified by providing the number of blocks + /// after which the swap will expire. /// /// Emits `SwapCreated` on success. #[pallet::weight(T::WeightInfo::create_swap())] From 56f228d97b6ae571000a6dae23d2c23fc453bd24 Mon Sep 17 00:00:00 2001 From: Jegor Sidorenko <5252494+jsidorenko@users.noreply.github.com> Date: Thu, 22 Dec 2022 14:22:16 +0200 Subject: [PATCH 062/101] [NFTs] Add the new `owner` param to mint() method (#12997) * Add the new `owner` param to mint() method * Fmt * Address comments --- frame/nfts/src/benchmarking.rs | 19 +-- frame/nfts/src/features/create_delete_item.rs | 22 +-- frame/nfts/src/impl_nonfungibles.rs | 1 + frame/nfts/src/lib.rs | 30 ++-- frame/nfts/src/tests.rs | 153 +++++++++++++----- .../src/traits/tokens/nonfungible_v2.rs | 4 +- 6 files changed, 153 insertions(+), 76 deletions(-) diff --git a/frame/nfts/src/benchmarking.rs b/frame/nfts/src/benchmarking.rs index 524b3662fe5b2..6517445da672d 100644 --- a/frame/nfts/src/benchmarking.rs +++ b/frame/nfts/src/benchmarking.rs @@ -79,6 +79,7 @@ fn mint_item, I: 'static>( SystemOrigin::Signed(caller.clone()).into(), T::Helper::collection(0), item, + caller_lookup.clone(), None, )); (item, caller, caller_lookup) @@ -174,7 +175,7 @@ benchmarks_instance_pallet! { let m in 0 .. 1_000; let a in 0 .. 1_000; - let (collection, caller, caller_lookup) = create_collection::(); + let (collection, caller, _) = create_collection::(); add_collection_metadata::(); for i in 0..n { mint_item::(i as u16); @@ -200,7 +201,7 @@ benchmarks_instance_pallet! { mint { let (collection, caller, caller_lookup) = create_collection::(); let item = T::Helper::item(0); - }: _(SystemOrigin::Signed(caller.clone()), collection, item, None) + }: _(SystemOrigin::Signed(caller.clone()), collection, item, caller_lookup, None) verify { assert_last_event::(Event::Issued { collection, item, owner: caller }.into()); } @@ -222,7 +223,7 @@ benchmarks_instance_pallet! { } transfer { - let (collection, caller, caller_lookup) = create_collection::(); + let (collection, caller, _) = create_collection::(); let (item, ..) = mint_item::(0); let target: T::AccountId = account("target", 0, SEED); @@ -235,7 +236,7 @@ benchmarks_instance_pallet! { redeposit { let i in 0 .. 5_000; - let (collection, caller, caller_lookup) = create_collection::(); + let (collection, caller, _) = create_collection::(); let items = (0..i).map(|x| mint_item::(x as u16).0).collect::>(); Nfts::::force_collection_config( SystemOrigin::Root.into(), @@ -248,7 +249,7 @@ benchmarks_instance_pallet! { } lock_item_transfer { - let (collection, caller, caller_lookup) = create_collection::(); + let (collection, caller, _) = create_collection::(); let (item, ..) = mint_item::(0); }: _(SystemOrigin::Signed(caller.clone()), T::Helper::collection(0), T::Helper::item(0)) verify { @@ -256,7 +257,7 @@ benchmarks_instance_pallet! { } unlock_item_transfer { - let (collection, caller, caller_lookup) = create_collection::(); + let (collection, caller, _) = create_collection::(); let (item, ..) = mint_item::(0); Nfts::::lock_item_transfer( SystemOrigin::Signed(caller.clone()).into(), @@ -269,7 +270,7 @@ benchmarks_instance_pallet! { } lock_collection { - let (collection, caller, caller_lookup) = create_collection::(); + let (collection, caller, _) = create_collection::(); let lock_settings = CollectionSettings::from_disabled( CollectionSetting::TransferableItems | CollectionSetting::UnlockedMetadata | @@ -324,7 +325,7 @@ benchmarks_instance_pallet! { } force_collection_config { - let (collection, caller, caller_lookup) = create_collection::(); + let (collection, caller, _) = create_collection::(); let origin = T::ForceOrigin::successful_origin(); let call = Call::::force_collection_config { collection, @@ -336,7 +337,7 @@ benchmarks_instance_pallet! { } lock_item_properties { - let (collection, caller, caller_lookup) = create_collection::(); + let (collection, caller, _) = create_collection::(); let (item, ..) = mint_item::(0); let lock_metadata = true; let lock_attributes = true; diff --git a/frame/nfts/src/features/create_delete_item.rs b/frame/nfts/src/features/create_delete_item.rs index bae1d02c8ad6b..7fd745b2bfff8 100644 --- a/frame/nfts/src/features/create_delete_item.rs +++ b/frame/nfts/src/features/create_delete_item.rs @@ -22,7 +22,8 @@ impl, I: 'static> Pallet { pub fn do_mint( collection: T::CollectionId, item: T::ItemId, - owner: T::AccountId, + depositor: T::AccountId, + mint_to: T::AccountId, item_config: ItemConfig, deposit_collection_owner: bool, with_details_and_config: impl FnOnce( @@ -45,9 +46,7 @@ impl, I: 'static> Pallet { ensure!(collection_details.items < max_supply, Error::::MaxSupplyReached); } - let items = - collection_details.items.checked_add(1).ok_or(ArithmeticError::Overflow)?; - collection_details.items = items; + collection_details.items.saturating_inc(); let collection_config = Self::get_collection_config(&collection)?; let deposit_amount = match collection_config @@ -58,11 +57,11 @@ impl, I: 'static> Pallet { }; let deposit_account = match deposit_collection_owner { true => collection_details.owner.clone(), - false => owner.clone(), + false => depositor, }; - let owner = owner.clone(); - Account::::insert((&owner, &collection, &item), ()); + let item_owner = mint_to.clone(); + Account::::insert((&item_owner, &collection, &item), ()); if let Ok(existing_config) = ItemConfigOf::::try_get(&collection, &item) { ensure!(existing_config == item_config, Error::::InconsistentItemConfig); @@ -73,14 +72,17 @@ impl, I: 'static> Pallet { T::Currency::reserve(&deposit_account, deposit_amount)?; let deposit = ItemDeposit { account: deposit_account, amount: deposit_amount }; - let details = - ItemDetails { owner, approvals: ApprovalsOf::::default(), deposit }; + let details = ItemDetails { + owner: item_owner, + approvals: ApprovalsOf::::default(), + deposit, + }; Item::::insert(&collection, &item, details); Ok(()) }, )?; - Self::deposit_event(Event::Issued { collection, item, owner }); + Self::deposit_event(Event::Issued { collection, item, owner: mint_to }); Ok(()) } diff --git a/frame/nfts/src/impl_nonfungibles.rs b/frame/nfts/src/impl_nonfungibles.rs index 9fa696cd5c5c7..edfc29710b7da 100644 --- a/frame/nfts/src/impl_nonfungibles.rs +++ b/frame/nfts/src/impl_nonfungibles.rs @@ -158,6 +158,7 @@ impl, I: 'static> Mutate<::AccountId, ItemConfig *collection, *item, who.clone(), + who.clone(), *item_config, deposit_collection_owner, |_, _| Ok(()), diff --git a/frame/nfts/src/lib.rs b/frame/nfts/src/lib.rs index 77353ce3359d2..8471670b58974 100644 --- a/frame/nfts/src/lib.rs +++ b/frame/nfts/src/lib.rs @@ -52,7 +52,7 @@ use frame_support::traits::{ use frame_system::Config as SystemConfig; use sp_runtime::{ traits::{Saturating, StaticLookup, Zero}, - ArithmeticError, RuntimeDebug, + RuntimeDebug, }; use sp_std::prelude::*; @@ -715,9 +715,12 @@ pub mod pallet { /// /// - `collection`: The collection of the item to be minted. /// - `item`: An identifier of the new item. + /// - `mint_to`: Account into which the item will be minted. /// - `witness_data`: When the mint type is `HolderOf(collection_id)`, then the owned /// item_id from that collection needs to be provided within the witness data object. /// + /// Note: the deposit will be taken from the `origin` and not the `owner` of the `item`. + /// /// Emits `Issued` event when successful. /// /// Weight: `O(1)` @@ -726,9 +729,11 @@ pub mod pallet { origin: OriginFor, collection: T::CollectionId, item: T::ItemId, + mint_to: AccountIdLookupOf, witness_data: Option>, ) -> DispatchResult { let caller = ensure_signed(origin)?; + let mint_to = T::Lookup::lookup(mint_to)?; let collection_config = Self::get_collection_config(&collection)?; let item_settings = collection_config.mint_settings.default_item_settings; @@ -738,9 +743,15 @@ pub mod pallet { collection, item, caller.clone(), + mint_to.clone(), item_config, false, |collection_details, collection_config| { + // Issuer can mint regardless of mint settings + if Self::has_role(&collection, &caller, CollectionRole::Issuer) { + return Ok(()) + } + let mint_settings = collection_config.mint_settings; let now = frame_system::Pallet::::block_number(); @@ -752,12 +763,7 @@ pub mod pallet { } match mint_settings.mint_type { - MintType::Issuer => { - ensure!( - Self::has_role(&collection, &caller, CollectionRole::Issuer), - Error::::NoPermission - ) - }, + MintType::Issuer => return Err(Error::::NoPermission.into()), MintType::HolderOf(collection_id) => { let MintWitness { owner_of_item } = witness_data.ok_or(Error::::BadWitness)?; @@ -813,7 +819,7 @@ pub mod pallet { /// /// - `collection`: The collection of the item to be minted. /// - `item`: An identifier of the new item. - /// - `owner`: An owner of the minted item. + /// - `mint_to`: Account into which the item will be minted. /// - `item_config`: A config of the new item. /// /// Emits `Issued` event when successful. @@ -824,13 +830,13 @@ pub mod pallet { origin: OriginFor, collection: T::CollectionId, item: T::ItemId, - owner: AccountIdLookupOf, + mint_to: AccountIdLookupOf, item_config: ItemConfig, ) -> DispatchResult { let maybe_check_origin = T::ForceOrigin::try_origin(origin) .map(|_| None) .or_else(|origin| ensure_signed(origin).map(Some).map_err(DispatchError::from))?; - let owner = T::Lookup::lookup(owner)?; + let mint_to = T::Lookup::lookup(mint_to)?; if let Some(check_origin) = maybe_check_origin { ensure!( @@ -838,7 +844,9 @@ pub mod pallet { Error::::NoPermission ); } - Self::do_mint(collection, item, owner, item_config, true, |_, _| Ok(())) + Self::do_mint(collection, item, mint_to.clone(), mint_to, item_config, true, |_, _| { + Ok(()) + }) } /// Destroy a single item. diff --git a/frame/nfts/src/tests.rs b/frame/nfts/src/tests.rs index e02e77ebe7dce..18a3fd83b4de3 100644 --- a/frame/nfts/src/tests.rs +++ b/frame/nfts/src/tests.rs @@ -146,12 +146,13 @@ fn basic_minting_should_work() { new_test_ext().execute_with(|| { assert_ok!(Nfts::force_create(RuntimeOrigin::root(), 1, default_collection_config())); assert_eq!(collections(), vec![(1, 0)]); - assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 42, None)); + assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 42, 1, None)); assert_eq!(items(), vec![(1, 0, 42)]); assert_ok!(Nfts::force_create(RuntimeOrigin::root(), 2, default_collection_config())); assert_eq!(collections(), vec![(1, 0), (2, 1)]); - assert_ok!(Nfts::force_mint(RuntimeOrigin::signed(2), 1, 69, 1, default_item_config())); + assert_ok!(Nfts::mint(RuntimeOrigin::signed(2), 1, 69, 1, None)); + // assert_ok!(Nfts::force_mint(RuntimeOrigin::signed(2), 1, 69, 1, default_item_config())); assert_eq!(items(), vec![(1, 0, 42), (1, 1, 69)]); }); } @@ -176,7 +177,7 @@ fn lifecycle_should_work() { assert_eq!(Balances::reserved_balance(&1), 6); assert_ok!(Nfts::force_mint(RuntimeOrigin::signed(1), 0, 69, 20, default_item_config())); assert_eq!(Balances::reserved_balance(&1), 7); - assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 70, None)); + assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 70, 1, None)); assert_eq!(items(), vec![(1, 0, 70), (10, 0, 42), (20, 0, 69)]); assert_eq!(Collection::::get(0).unwrap().items, 3); assert_eq!(Collection::::get(0).unwrap().item_metadatas, 0); @@ -221,7 +222,7 @@ fn destroy_with_bad_witness_should_not_work() { )); let w = Collection::::get(0).unwrap().destroy_witness(); - assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 42, None)); + assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 42, 1, None)); assert_noop!(Nfts::destroy(RuntimeOrigin::signed(1), 0, w), Error::::BadWitness); }); } @@ -230,7 +231,7 @@ fn destroy_with_bad_witness_should_not_work() { fn mint_should_work() { new_test_ext().execute_with(|| { assert_ok!(Nfts::force_create(RuntimeOrigin::root(), 1, default_collection_config())); - assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 42, None)); + assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 42, 1, None)); assert_eq!(Nfts::owner(0, 42).unwrap(), 1); assert_eq!(collections(), vec![(1, 0)]); assert_eq!(items(), vec![(1, 0, 42)]); @@ -239,16 +240,24 @@ fn mint_should_work() { assert_ok!(Nfts::update_mint_settings( RuntimeOrigin::signed(1), 0, - MintSettings { start_block: Some(2), end_block: Some(3), ..Default::default() } + MintSettings { + start_block: Some(2), + end_block: Some(3), + mint_type: MintType::Public, + ..Default::default() + } )); System::set_block_number(1); assert_noop!( - Nfts::mint(RuntimeOrigin::signed(1), 0, 43, None), + Nfts::mint(RuntimeOrigin::signed(2), 0, 43, 1, None), Error::::MintNotStarted ); System::set_block_number(4); - assert_noop!(Nfts::mint(RuntimeOrigin::signed(1), 0, 43, None), Error::::MintEnded); + assert_noop!( + Nfts::mint(RuntimeOrigin::signed(2), 0, 43, 1, None), + Error::::MintEnded + ); // validate price assert_ok!(Nfts::update_mint_settings( @@ -257,7 +266,7 @@ fn mint_should_work() { MintSettings { mint_type: MintType::Public, price: Some(1), ..Default::default() } )); Balances::make_free_balance_be(&2, 100); - assert_ok!(Nfts::mint(RuntimeOrigin::signed(2), 0, 43, None)); + assert_ok!(Nfts::mint(RuntimeOrigin::signed(2), 0, 43, 2, None)); assert_eq!(Balances::total_balance(&2), 99); // validate types @@ -267,22 +276,29 @@ fn mint_should_work() { 1, MintSettings { mint_type: MintType::HolderOf(0), ..Default::default() } )); - assert_noop!(Nfts::mint(RuntimeOrigin::signed(3), 1, 42, None), Error::::BadWitness); - assert_noop!(Nfts::mint(RuntimeOrigin::signed(2), 1, 42, None), Error::::BadWitness); assert_noop!( - Nfts::mint(RuntimeOrigin::signed(2), 1, 42, Some(MintWitness { owner_of_item: 42 })), + Nfts::mint(RuntimeOrigin::signed(3), 1, 42, 3, None), + Error::::BadWitness + ); + assert_noop!( + Nfts::mint(RuntimeOrigin::signed(2), 1, 42, 2, None), + Error::::BadWitness + ); + assert_noop!( + Nfts::mint(RuntimeOrigin::signed(2), 1, 42, 2, Some(MintWitness { owner_of_item: 42 })), Error::::BadWitness ); assert_ok!(Nfts::mint( RuntimeOrigin::signed(2), 1, 42, + 2, Some(MintWitness { owner_of_item: 43 }) )); // can't mint twice assert_noop!( - Nfts::mint(RuntimeOrigin::signed(2), 1, 46, Some(MintWitness { owner_of_item: 43 })), + Nfts::mint(RuntimeOrigin::signed(2), 1, 46, 2, Some(MintWitness { owner_of_item: 43 })), Error::::AlreadyClaimed ); }); @@ -327,7 +343,7 @@ fn transfer_should_work() { fn locking_transfer_should_work() { new_test_ext().execute_with(|| { assert_ok!(Nfts::force_create(RuntimeOrigin::root(), 1, default_collection_config())); - assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 42, None)); + assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 42, 1, None)); assert_ok!(Nfts::lock_item_transfer(RuntimeOrigin::signed(1), 0, 42)); assert_noop!(Nfts::transfer(RuntimeOrigin::signed(1), 0, 42, 2), Error::::ItemLocked); @@ -355,7 +371,7 @@ fn locking_transfer_should_work() { fn origin_guards_should_work() { new_test_ext().execute_with(|| { assert_ok!(Nfts::force_create(RuntimeOrigin::root(), 1, default_collection_config())); - assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 42, None)); + assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 42, 1, None)); Balances::make_free_balance_be(&2, 100); assert_ok!(Nfts::set_accept_ownership(RuntimeOrigin::signed(2), Some(0))); @@ -376,7 +392,7 @@ fn origin_guards_should_work() { Error::::NoPermission ); assert_noop!( - Nfts::mint(RuntimeOrigin::signed(2), 0, 69, None), + Nfts::mint(RuntimeOrigin::signed(2), 0, 69, 2, None), Error::::NoPermission ); assert_noop!( @@ -421,7 +437,7 @@ fn transfer_owner_should_work() { // Mint and set metadata now and make sure that deposit gets transferred back. assert_ok!(Nfts::set_collection_metadata(RuntimeOrigin::signed(2), 0, bvec![0u8; 20])); - assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 42, None)); + assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 42, 1, None)); assert_eq!(Balances::reserved_balance(&1), 1); assert_ok!(Nfts::set_metadata(RuntimeOrigin::signed(2), 0, 42, bvec![0u8; 20])); assert_ok!(Nfts::set_accept_ownership(RuntimeOrigin::signed(3), Some(0))); @@ -451,7 +467,7 @@ fn set_team_should_work() { assert_ok!(Nfts::force_create(RuntimeOrigin::root(), 1, default_collection_config())); assert_ok!(Nfts::set_team(RuntimeOrigin::signed(1), 0, 2, 3, 4)); - assert_ok!(Nfts::mint(RuntimeOrigin::signed(2), 0, 42, None)); + assert_ok!(Nfts::mint(RuntimeOrigin::signed(2), 0, 42, 2, None)); assert_ok!(Nfts::lock_item_transfer(RuntimeOrigin::signed(4), 0, 42)); assert_ok!(Nfts::unlock_item_transfer(RuntimeOrigin::signed(4), 0, 42)); assert_ok!(Nfts::transfer(RuntimeOrigin::signed(3), 0, 42, 3)); @@ -545,7 +561,7 @@ fn set_item_metadata_should_work() { 1, collection_config_with_all_settings_enabled() )); - assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 42, None)); + assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 42, 1, None)); // Cannot add metadata to unowned item assert_noop!( Nfts::set_metadata(RuntimeOrigin::signed(2), 0, 42, bvec![0u8; 20]), @@ -609,7 +625,7 @@ fn set_collection_owner_attributes_should_work() { 1, collection_config_with_all_settings_enabled() )); - assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 0, None)); + assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 0, 1, None)); assert_ok!(Nfts::set_attribute( RuntimeOrigin::signed(1), @@ -1009,8 +1025,8 @@ fn set_attribute_should_respect_lock() { 1, collection_config_with_all_settings_enabled(), )); - assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 0, None)); - assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 1, None)); + assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 0, 1, None)); + assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 1, 1, None)); assert_ok!(Nfts::set_attribute( RuntimeOrigin::signed(1), @@ -1108,8 +1124,8 @@ fn preserve_config_for_frozen_items() { 1, collection_config_with_all_settings_enabled() )); - assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 0, None)); - assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 1, None)); + assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 0, 1, None)); + assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 1, 1, None)); // if the item is not locked/frozen then the config gets deleted on item burn assert_ok!(Nfts::burn(RuntimeOrigin::signed(1), 0, 1, Some(1))); @@ -1144,7 +1160,7 @@ fn preserve_config_for_frozen_items() { ..Default::default() } )); - assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 0, None)); + assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 0, 1, None)); }); } @@ -1158,7 +1174,7 @@ fn force_update_collection_should_work() { 1, collection_config_with_all_settings_enabled() )); - assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 42, None)); + assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 42, 1, None)); assert_ok!(Nfts::force_mint(RuntimeOrigin::signed(1), 0, 69, 2, default_item_config())); assert_ok!(Nfts::set_collection_metadata(RuntimeOrigin::signed(1), 0, bvec![0; 20])); assert_ok!(Nfts::set_metadata(RuntimeOrigin::signed(1), 0, 42, bvec![0; 20])); @@ -1171,7 +1187,7 @@ fn force_update_collection_should_work() { 0, collection_config_from_disabled_settings(CollectionSetting::DepositRequired.into()), )); - assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 142, None)); + assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 142, 1, None)); assert_ok!(Nfts::force_mint(RuntimeOrigin::signed(1), 0, 169, 2, default_item_config())); assert_ok!(Nfts::set_metadata(RuntimeOrigin::signed(1), 0, 142, bvec![0; 20])); assert_ok!(Nfts::set_metadata(RuntimeOrigin::signed(1), 0, 169, bvec![0; 20])); @@ -1283,7 +1299,7 @@ fn approval_lifecycle_works() { ) )); - assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 1, collection_id, None)); + assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 1, collection_id, 1, None)); assert_noop!( Nfts::approve_transfer(RuntimeOrigin::signed(1), collection_id, 1, 2, None), @@ -1551,10 +1567,10 @@ fn max_supply_should_work() { ); // validate we can't mint more to max supply - assert_ok!(Nfts::mint(RuntimeOrigin::signed(user_id), collection_id, 0, None)); - assert_ok!(Nfts::mint(RuntimeOrigin::signed(user_id), collection_id, 1, None)); + assert_ok!(Nfts::mint(RuntimeOrigin::signed(user_id), collection_id, 0, user_id, None)); + assert_ok!(Nfts::mint(RuntimeOrigin::signed(user_id), collection_id, 1, user_id, None)); assert_noop!( - Nfts::mint(RuntimeOrigin::signed(user_id), collection_id, 2, None), + Nfts::mint(RuntimeOrigin::signed(user_id), collection_id, 2, user_id, None), Error::::MaxSupplyReached ); }); @@ -1568,7 +1584,13 @@ fn mint_settings_should_work() { let item_id = 0; assert_ok!(Nfts::force_create(RuntimeOrigin::root(), user_id, default_collection_config())); - assert_ok!(Nfts::mint(RuntimeOrigin::signed(user_id), collection_id, item_id, None)); + assert_ok!(Nfts::mint( + RuntimeOrigin::signed(user_id), + collection_id, + item_id, + user_id, + None, + )); assert_eq!( ItemConfigOf::::get(collection_id, item_id) .unwrap() @@ -1591,7 +1613,13 @@ fn mint_settings_should_work() { ..default_collection_config() } )); - assert_ok!(Nfts::mint(RuntimeOrigin::signed(user_id), collection_id, item_id, None)); + assert_ok!(Nfts::mint( + RuntimeOrigin::signed(user_id), + collection_id, + item_id, + user_id, + None, + )); assert_eq!( ItemConfigOf::::get(collection_id, item_id) .unwrap() @@ -1613,8 +1641,20 @@ fn set_price_should_work() { assert_ok!(Nfts::force_create(RuntimeOrigin::root(), user_id, default_collection_config())); - assert_ok!(Nfts::mint(RuntimeOrigin::signed(user_id), collection_id, item_1, None)); - assert_ok!(Nfts::mint(RuntimeOrigin::signed(user_id), collection_id, item_2, None)); + assert_ok!(Nfts::mint( + RuntimeOrigin::signed(user_id), + collection_id, + item_1, + user_id, + None, + )); + assert_ok!(Nfts::mint( + RuntimeOrigin::signed(user_id), + collection_id, + item_2, + user_id, + None, + )); assert_ok!(Nfts::set_price( RuntimeOrigin::signed(user_id), @@ -1671,7 +1711,13 @@ fn set_price_should_work() { ) )); - assert_ok!(Nfts::mint(RuntimeOrigin::signed(user_id), collection_id, item_1, None)); + assert_ok!(Nfts::mint( + RuntimeOrigin::signed(user_id), + collection_id, + item_1, + user_id, + None, + )); assert_noop!( Nfts::set_price(RuntimeOrigin::signed(user_id), collection_id, item_1, Some(2), None), @@ -1700,9 +1746,9 @@ fn buy_item_should_work() { assert_ok!(Nfts::force_create(RuntimeOrigin::root(), user_1, default_collection_config())); - assert_ok!(Nfts::mint(RuntimeOrigin::signed(user_1), collection_id, item_1, None)); - assert_ok!(Nfts::mint(RuntimeOrigin::signed(user_1), collection_id, item_2, None)); - assert_ok!(Nfts::mint(RuntimeOrigin::signed(user_1), collection_id, item_3, None)); + assert_ok!(Nfts::mint(RuntimeOrigin::signed(user_1), collection_id, item_1, user_1, None)); + assert_ok!(Nfts::mint(RuntimeOrigin::signed(user_1), collection_id, item_2, user_1, None)); + assert_ok!(Nfts::mint(RuntimeOrigin::signed(user_1), collection_id, item_3, user_1, None)); assert_ok!(Nfts::set_price( RuntimeOrigin::signed(user_1), @@ -1887,8 +1933,20 @@ fn create_cancel_swap_should_work() { assert_ok!(Nfts::force_create(RuntimeOrigin::root(), user_id, default_collection_config())); - assert_ok!(Nfts::mint(RuntimeOrigin::signed(user_id), collection_id, item_1, None,)); - assert_ok!(Nfts::mint(RuntimeOrigin::signed(user_id), collection_id, item_2, None,)); + assert_ok!(Nfts::mint( + RuntimeOrigin::signed(user_id), + collection_id, + item_1, + user_id, + None, + )); + assert_ok!(Nfts::mint( + RuntimeOrigin::signed(user_id), + collection_id, + item_2, + user_id, + None, + )); // validate desired item and the collection exists assert_noop!( @@ -2028,7 +2086,7 @@ fn claim_swap_should_work() { assert_ok!(Nfts::mint( RuntimeOrigin::signed(user_1), collection_id, - item_1, + item_1,user_1, None, )); assert_ok!(Nfts::force_mint( @@ -2049,6 +2107,7 @@ fn claim_swap_should_work() { RuntimeOrigin::signed(user_1), collection_id, item_4, + user_1, None, )); assert_ok!(Nfts::force_mint( @@ -2300,7 +2359,13 @@ fn pallet_level_feature_flags_should_work() { assert_ok!(Nfts::force_create(RuntimeOrigin::root(), user_id, default_collection_config())); - assert_ok!(Nfts::mint(RuntimeOrigin::signed(user_id), collection_id, item_id, None,)); + assert_ok!(Nfts::mint( + RuntimeOrigin::signed(user_id), + collection_id, + item_id, + user_id, + None, + )); // PalletFeature::Trading assert_noop!( @@ -2374,7 +2439,7 @@ fn add_remove_item_attributes_approval_should_work() { let item_id = 0; assert_ok!(Nfts::force_create(RuntimeOrigin::root(), user_1, default_collection_config())); - assert_ok!(Nfts::mint(RuntimeOrigin::signed(user_1), collection_id, item_id, None)); + assert_ok!(Nfts::mint(RuntimeOrigin::signed(user_1), collection_id, item_id, user_1, None)); assert_ok!(Nfts::approve_item_attributes( RuntimeOrigin::signed(user_1), collection_id, diff --git a/frame/support/src/traits/tokens/nonfungible_v2.rs b/frame/support/src/traits/tokens/nonfungible_v2.rs index 187127098db16..a1b75e62e4db5 100644 --- a/frame/support/src/traits/tokens/nonfungible_v2.rs +++ b/frame/support/src/traits/tokens/nonfungible_v2.rs @@ -135,8 +135,8 @@ pub trait Transfer: Inspect { fn transfer(item: &Self::ItemId, destination: &AccountId) -> DispatchResult; } -/// Convert a `nonfungibles` trait implementation into a `nonfungible` trait implementation by identifying -/// a single item. +/// Convert a `nonfungibles` trait implementation into a `nonfungible` trait implementation by +/// identifying a single item. pub struct ItemOf< F: nonfungibles::Inspect, A: Get<>::CollectionId>, From a8c69a99f7cc71ed27d466d6d7b2ef73f35c40c7 Mon Sep 17 00:00:00 2001 From: command-bot <> Date: Thu, 22 Dec 2022 12:56:44 +0000 Subject: [PATCH 063/101] ".git/.scripts/bench-bot.sh" pallet dev pallet_nfts --- frame/nfts/src/weights.rs | 384 +++++++++++++++++++------------------- 1 file changed, 193 insertions(+), 191 deletions(-) diff --git a/frame/nfts/src/weights.rs b/frame/nfts/src/weights.rs index 2965f335c229e..f05f8ca514c3e 100644 --- a/frame/nfts/src/weights.rs +++ b/frame/nfts/src/weights.rs @@ -18,7 +18,7 @@ //! Autogenerated weights for pallet_nfts //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2022-11-25, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2022-12-22, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` //! HOSTNAME: `bm3`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 @@ -96,8 +96,8 @@ impl WeightInfo for SubstrateWeight { // Storage: Nfts CollectionConfigOf (r:0 w:1) // Storage: Nfts CollectionAccount (r:0 w:1) fn create() -> Weight { - // Minimum execution time: 42_075 nanoseconds. - Weight::from_ref_time(42_614_000) + // Minimum execution time: 44_312 nanoseconds. + Weight::from_ref_time(44_871_000) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(5)) } @@ -107,37 +107,38 @@ impl WeightInfo for SubstrateWeight { // Storage: Nfts CollectionConfigOf (r:0 w:1) // Storage: Nfts CollectionAccount (r:0 w:1) fn force_create() -> Weight { - // Minimum execution time: 29_799 nanoseconds. - Weight::from_ref_time(30_511_000) + // Minimum execution time: 31_654 nanoseconds. + Weight::from_ref_time(32_078_000) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(5)) } // Storage: Nfts Collection (r:1 w:1) - // Storage: Nfts Item (r:1 w:0) - // Storage: Nfts Attribute (r:1 w:0) + // Storage: Nfts Item (r:1001 w:1000) + // Storage: Nfts Attribute (r:1001 w:1000) + // Storage: Nfts ItemMetadataOf (r:0 w:1000) // Storage: Nfts CollectionRoleOf (r:0 w:1) // Storage: Nfts CollectionMetadataOf (r:0 w:1) // Storage: Nfts CollectionConfigOf (r:0 w:1) + // Storage: Nfts ItemConfigOf (r:0 w:1000) + // Storage: Nfts Account (r:0 w:1000) // Storage: Nfts CollectionAccount (r:0 w:1) - // Storage: Nfts ItemMetadataOf (r:0 w:20) - // Storage: Nfts ItemConfigOf (r:0 w:20) - // Storage: Nfts Account (r:0 w:20) /// The range of component `n` is `[0, 1000]`. /// The range of component `m` is `[0, 1000]`. /// The range of component `a` is `[0, 1000]`. fn destroy(n: u32, m: u32, a: u32, ) -> Weight { - // Minimum execution time: 65_846 nanoseconds. - Weight::from_ref_time(66_082_000) - // Standard Error: 27_878 - .saturating_add(Weight::from_ref_time(26_747_590).saturating_mul(n.into())) - // Standard Error: 27_329 - .saturating_add(Weight::from_ref_time(315_839 as u64).saturating_mul(m.into())) - // Standard Error: 27_329 - .saturating_add(Weight::from_ref_time(217_497 as u64).saturating_mul(a.into())) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().reads((2_u64).saturating_mul(n.into()))) - .saturating_add(T::DbWeight::get().writes(5)) - .saturating_add(T::DbWeight::get().writes((5_u64).saturating_mul(n.into()))) + // Minimum execution time: 19_183_393 nanoseconds. + Weight::from_ref_time(17_061_526_855) + // Standard Error: 16_689 + .saturating_add(Weight::from_ref_time(353_523).saturating_mul(n.into())) + // Standard Error: 16_689 + .saturating_add(Weight::from_ref_time(1_861_080).saturating_mul(m.into())) + // Standard Error: 16_689 + .saturating_add(Weight::from_ref_time(8_858_987).saturating_mul(a.into())) + .saturating_add(T::DbWeight::get().reads(1003)) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(a.into()))) + .saturating_add(T::DbWeight::get().writes(3005)) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(m.into()))) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(a.into()))) } // Storage: Nfts CollectionConfigOf (r:1 w:0) // Storage: Nfts Item (r:1 w:1) @@ -146,8 +147,8 @@ impl WeightInfo for SubstrateWeight { // Storage: Nfts ItemConfigOf (r:1 w:1) // Storage: Nfts Account (r:0 w:1) fn mint() -> Weight { - // Minimum execution time: 58_577 nanoseconds. - Weight::from_ref_time(59_058_000) + // Minimum execution time: 57_753 nanoseconds. + Weight::from_ref_time(58_313_000) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(4)) } @@ -158,8 +159,8 @@ impl WeightInfo for SubstrateWeight { // Storage: Nfts ItemConfigOf (r:1 w:1) // Storage: Nfts Account (r:0 w:1) fn force_mint() -> Weight { - // Minimum execution time: 56_494 nanoseconds. - Weight::from_ref_time(57_565_000) + // Minimum execution time: 56_429 nanoseconds. + Weight::from_ref_time(57_202_000) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(4)) } @@ -172,8 +173,8 @@ impl WeightInfo for SubstrateWeight { // Storage: Nfts ItemAttributesApprovalsOf (r:0 w:1) // Storage: Nfts PendingSwapOf (r:0 w:1) fn burn() -> Weight { - // Minimum execution time: 59_393 nanoseconds. - Weight::from_ref_time(60_562_000) + // Minimum execution time: 59_681 nanoseconds. + Weight::from_ref_time(60_058_000) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(7)) } @@ -187,8 +188,8 @@ impl WeightInfo for SubstrateWeight { // Storage: Nfts ItemPriceOf (r:0 w:1) // Storage: Nfts PendingSwapOf (r:0 w:1) fn transfer() -> Weight { - // Minimum execution time: 65_852 nanoseconds. - Weight::from_ref_time(66_308_000) + // Minimum execution time: 66_085 nanoseconds. + Weight::from_ref_time(67_065_000) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(6)) } @@ -197,10 +198,10 @@ impl WeightInfo for SubstrateWeight { // Storage: Nfts Item (r:102 w:102) /// The range of component `i` is `[0, 5000]`. fn redeposit(i: u32, ) -> Weight { - // Minimum execution time: 25_795 nanoseconds. - Weight::from_ref_time(26_128_000) - // Standard Error: 10_295 - .saturating_add(Weight::from_ref_time(11_202_286).saturating_mul(i.into())) + // Minimum execution time: 25_949 nanoseconds. + Weight::from_ref_time(26_106_000) + // Standard Error: 10_326 + .saturating_add(Weight::from_ref_time(11_496_776).saturating_mul(i.into())) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(i.into()))) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(i.into()))) @@ -208,24 +209,24 @@ impl WeightInfo for SubstrateWeight { // Storage: Nfts CollectionRoleOf (r:1 w:0) // Storage: Nfts ItemConfigOf (r:1 w:1) fn lock_item_transfer() -> Weight { - // Minimum execution time: 29_090 nanoseconds. - Weight::from_ref_time(29_772_000) + // Minimum execution time: 30_080 nanoseconds. + Weight::from_ref_time(30_825_000) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) } // Storage: Nfts CollectionRoleOf (r:1 w:0) // Storage: Nfts ItemConfigOf (r:1 w:1) fn unlock_item_transfer() -> Weight { - // Minimum execution time: 28_947 nanoseconds. - Weight::from_ref_time(29_559_000) + // Minimum execution time: 30_612 nanoseconds. + Weight::from_ref_time(31_422_000) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) } // Storage: Nfts CollectionRoleOf (r:1 w:0) // Storage: Nfts CollectionConfigOf (r:1 w:1) fn lock_collection() -> Weight { - // Minimum execution time: 26_972 nanoseconds. - Weight::from_ref_time(27_803_000) + // Minimum execution time: 27_470 nanoseconds. + Weight::from_ref_time(28_015_000) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -233,40 +234,40 @@ impl WeightInfo for SubstrateWeight { // Storage: Nfts Collection (r:1 w:1) // Storage: Nfts CollectionAccount (r:0 w:2) fn transfer_ownership() -> Weight { - // Minimum execution time: 32_165 nanoseconds. - Weight::from_ref_time(32_926_000) + // Minimum execution time: 33_750 nanoseconds. + Weight::from_ref_time(34_139_000) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(4)) } // Storage: Nfts Collection (r:1 w:1) // Storage: Nfts CollectionRoleOf (r:0 w:4) fn set_team() -> Weight { - // Minimum execution time: 35_375 nanoseconds. - Weight::from_ref_time(35_950_000) + // Minimum execution time: 36_565 nanoseconds. + Weight::from_ref_time(37_464_000) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(5)) } // Storage: Nfts Collection (r:1 w:1) // Storage: Nfts CollectionAccount (r:0 w:2) fn force_collection_owner() -> Weight { - // Minimum execution time: 27_470 nanoseconds. - Weight::from_ref_time(27_855_000) + // Minimum execution time: 29_028 nanoseconds. + Weight::from_ref_time(29_479_000) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(3)) } // Storage: Nfts Collection (r:1 w:0) // Storage: Nfts CollectionConfigOf (r:0 w:1) fn force_collection_config() -> Weight { - // Minimum execution time: 23_990 nanoseconds. - Weight::from_ref_time(24_347_000) + // Minimum execution time: 24_695 nanoseconds. + Weight::from_ref_time(25_304_000) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } // Storage: Nfts Collection (r:1 w:0) // Storage: Nfts ItemConfigOf (r:1 w:1) fn lock_item_properties() -> Weight { - // Minimum execution time: 28_481 nanoseconds. - Weight::from_ref_time(28_929_000) + // Minimum execution time: 28_910 nanoseconds. + Weight::from_ref_time(29_186_000) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -275,16 +276,16 @@ impl WeightInfo for SubstrateWeight { // Storage: Nfts ItemConfigOf (r:1 w:0) // Storage: Nfts Attribute (r:1 w:1) fn set_attribute() -> Weight { - // Minimum execution time: 54_435 nanoseconds. - Weight::from_ref_time(55_237_000) + // Minimum execution time: 56_407 nanoseconds. + Weight::from_ref_time(58_176_000) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) } // Storage: Nfts Collection (r:1 w:1) // Storage: Nfts Attribute (r:1 w:1) fn force_set_attribute() -> Weight { - // Minimum execution time: 35_254 nanoseconds. - Weight::from_ref_time(35_941_000) + // Minimum execution time: 36_402 nanoseconds. + Weight::from_ref_time(37_034_000) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -292,16 +293,16 @@ impl WeightInfo for SubstrateWeight { // Storage: Nfts Collection (r:1 w:1) // Storage: Nfts ItemConfigOf (r:1 w:0) fn clear_attribute() -> Weight { - // Minimum execution time: 51_483 nanoseconds. - Weight::from_ref_time(52_915_000) + // Minimum execution time: 52_022 nanoseconds. + Weight::from_ref_time(54_059_000) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) } // Storage: Nfts Item (r:1 w:0) // Storage: Nfts ItemAttributesApprovalsOf (r:1 w:1) fn approve_item_attributes() -> Weight { - // Minimum execution time: 27_929 nanoseconds. - Weight::from_ref_time(28_329_000) + // Minimum execution time: 28_475 nanoseconds. + Weight::from_ref_time(29_162_000) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -311,10 +312,10 @@ impl WeightInfo for SubstrateWeight { // Storage: System Account (r:1 w:1) /// The range of component `n` is `[0, 1000]`. fn cancel_item_attributes_approval(n: u32, ) -> Weight { - // Minimum execution time: 37_217 nanoseconds. - Weight::from_ref_time(37_692_000) - // Standard Error: 7_804 - .saturating_add(Weight::from_ref_time(7_344_173).saturating_mul(n.into())) + // Minimum execution time: 37_529 nanoseconds. + Weight::from_ref_time(38_023_000) + // Standard Error: 8_136 + .saturating_add(Weight::from_ref_time(7_452_872).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(T::DbWeight::get().writes(2)) @@ -325,8 +326,8 @@ impl WeightInfo for SubstrateWeight { // Storage: Nfts CollectionConfigOf (r:1 w:0) // Storage: Nfts ItemMetadataOf (r:1 w:1) fn set_metadata() -> Weight { - // Minimum execution time: 47_675 nanoseconds. - Weight::from_ref_time(48_282_000) + // Minimum execution time: 49_300 nanoseconds. + Weight::from_ref_time(49_790_000) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -334,8 +335,8 @@ impl WeightInfo for SubstrateWeight { // Storage: Nfts ItemConfigOf (r:1 w:0) // Storage: Nfts ItemMetadataOf (r:1 w:1) fn clear_metadata() -> Weight { - // Minimum execution time: 46_062 nanoseconds. - Weight::from_ref_time(46_854_000) + // Minimum execution time: 47_248 nanoseconds. + Weight::from_ref_time(48_094_000) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -343,8 +344,8 @@ impl WeightInfo for SubstrateWeight { // Storage: Nfts Collection (r:1 w:1) // Storage: Nfts CollectionMetadataOf (r:1 w:1) fn set_collection_metadata() -> Weight { - // Minimum execution time: 43_847 nanoseconds. - Weight::from_ref_time(44_792_000) + // Minimum execution time: 44_137 nanoseconds. + Weight::from_ref_time(44_905_000) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -352,8 +353,8 @@ impl WeightInfo for SubstrateWeight { // Storage: Nfts CollectionConfigOf (r:1 w:0) // Storage: Nfts CollectionMetadataOf (r:1 w:1) fn clear_collection_metadata() -> Weight { - // Minimum execution time: 42_403 nanoseconds. - Weight::from_ref_time(42_811_000) + // Minimum execution time: 43_005 nanoseconds. + Weight::from_ref_time(43_898_000) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -361,47 +362,47 @@ impl WeightInfo for SubstrateWeight { // Storage: Nfts CollectionConfigOf (r:1 w:0) // Storage: Nfts CollectionRoleOf (r:1 w:0) fn approve_transfer() -> Weight { - // Minimum execution time: 34_880 nanoseconds. - Weight::from_ref_time(35_737_000) + // Minimum execution time: 36_344 nanoseconds. + Weight::from_ref_time(36_954_000) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) } // Storage: Nfts Item (r:1 w:1) // Storage: Nfts CollectionRoleOf (r:1 w:0) fn cancel_approval() -> Weight { - // Minimum execution time: 31_606 nanoseconds. - Weight::from_ref_time(32_339_000) + // Minimum execution time: 32_418 nanoseconds. + Weight::from_ref_time(33_029_000) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) } // Storage: Nfts Item (r:1 w:1) // Storage: Nfts CollectionRoleOf (r:1 w:0) fn clear_all_transfer_approvals() -> Weight { - // Minimum execution time: 30_626 nanoseconds. - Weight::from_ref_time(31_043_000) + // Minimum execution time: 31_448 nanoseconds. + Weight::from_ref_time(31_979_000) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) } // Storage: Nfts OwnershipAcceptance (r:1 w:1) fn set_accept_ownership() -> Weight { - // Minimum execution time: 27_276 nanoseconds. - Weight::from_ref_time(28_016_000) + // Minimum execution time: 27_487 nanoseconds. + Weight::from_ref_time(28_080_000) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } // Storage: Nfts CollectionConfigOf (r:1 w:1) // Storage: Nfts Collection (r:1 w:0) fn set_collection_max_supply() -> Weight { - // Minimum execution time: 28_366 nanoseconds. - Weight::from_ref_time(28_719_000) + // Minimum execution time: 28_235 nanoseconds. + Weight::from_ref_time(28_967_000) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) } // Storage: Nfts Collection (r:1 w:0) // Storage: Nfts CollectionConfigOf (r:1 w:1) fn update_mint_settings() -> Weight { - // Minimum execution time: 27_292 nanoseconds. - Weight::from_ref_time(27_614_000) + // Minimum execution time: 28_172 nanoseconds. + Weight::from_ref_time(28_636_000) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -410,8 +411,8 @@ impl WeightInfo for SubstrateWeight { // Storage: Nfts ItemConfigOf (r:1 w:0) // Storage: Nfts ItemPriceOf (r:0 w:1) fn set_price() -> Weight { - // Minimum execution time: 34_133 nanoseconds. - Weight::from_ref_time(34_510_000) + // Minimum execution time: 35_336 nanoseconds. + Weight::from_ref_time(36_026_000) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -424,31 +425,31 @@ impl WeightInfo for SubstrateWeight { // Storage: Nfts Account (r:0 w:2) // Storage: Nfts PendingSwapOf (r:0 w:1) fn buy_item() -> Weight { - // Minimum execution time: 69_501 nanoseconds. - Weight::from_ref_time(70_342_000) + // Minimum execution time: 70_971 nanoseconds. + Weight::from_ref_time(72_036_000) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(6)) } /// The range of component `n` is `[0, 10]`. fn pay_tips(n: u32, ) -> Weight { - // Minimum execution time: 4_754 nanoseconds. - Weight::from_ref_time(11_356_736) - // Standard Error: 38_352 - .saturating_add(Weight::from_ref_time(3_427_961).saturating_mul(n.into())) + // Minimum execution time: 5_151 nanoseconds. + Weight::from_ref_time(11_822_888) + // Standard Error: 38_439 + .saturating_add(Weight::from_ref_time(3_511_844).saturating_mul(n.into())) } // Storage: Nfts Item (r:2 w:0) // Storage: Nfts PendingSwapOf (r:0 w:1) fn create_swap() -> Weight { - // Minimum execution time: 31_371 nanoseconds. - Weight::from_ref_time(32_227_000) + // Minimum execution time: 33_027 nanoseconds. + Weight::from_ref_time(33_628_000) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) } // Storage: Nfts PendingSwapOf (r:1 w:1) // Storage: Nfts Item (r:1 w:0) fn cancel_swap() -> Weight { - // Minimum execution time: 34_114 nanoseconds. - Weight::from_ref_time(34_779_000) + // Minimum execution time: 35_890 nanoseconds. + Weight::from_ref_time(36_508_000) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -461,8 +462,8 @@ impl WeightInfo for SubstrateWeight { // Storage: Nfts Account (r:0 w:4) // Storage: Nfts ItemPriceOf (r:0 w:2) fn claim_swap() -> Weight { - // Minimum execution time: 97_965 nanoseconds. - Weight::from_ref_time(98_699_000) + // Minimum execution time: 101_076 nanoseconds. + Weight::from_ref_time(101_863_000) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(11)) } @@ -476,8 +477,8 @@ impl WeightInfo for () { // Storage: Nfts CollectionConfigOf (r:0 w:1) // Storage: Nfts CollectionAccount (r:0 w:1) fn create() -> Weight { - // Minimum execution time: 42_075 nanoseconds. - Weight::from_ref_time(42_614_000) + // Minimum execution time: 44_312 nanoseconds. + Weight::from_ref_time(44_871_000) .saturating_add(RocksDbWeight::get().reads(2)) .saturating_add(RocksDbWeight::get().writes(5)) } @@ -487,37 +488,38 @@ impl WeightInfo for () { // Storage: Nfts CollectionConfigOf (r:0 w:1) // Storage: Nfts CollectionAccount (r:0 w:1) fn force_create() -> Weight { - // Minimum execution time: 29_799 nanoseconds. - Weight::from_ref_time(30_511_000) + // Minimum execution time: 31_654 nanoseconds. + Weight::from_ref_time(32_078_000) .saturating_add(RocksDbWeight::get().reads(2)) .saturating_add(RocksDbWeight::get().writes(5)) } // Storage: Nfts Collection (r:1 w:1) - // Storage: Nfts Item (r:1 w:0) - // Storage: Nfts Attribute (r:1 w:0) + // Storage: Nfts Item (r:1001 w:1000) + // Storage: Nfts Attribute (r:1001 w:1000) + // Storage: Nfts ItemMetadataOf (r:0 w:1000) // Storage: Nfts CollectionRoleOf (r:0 w:1) // Storage: Nfts CollectionMetadataOf (r:0 w:1) // Storage: Nfts CollectionConfigOf (r:0 w:1) + // Storage: Nfts ItemConfigOf (r:0 w:1000) + // Storage: Nfts Account (r:0 w:1000) // Storage: Nfts CollectionAccount (r:0 w:1) - // Storage: Nfts ItemMetadataOf (r:0 w:20) - // Storage: Nfts ItemConfigOf (r:0 w:20) - // Storage: Nfts Account (r:0 w:20) /// The range of component `n` is `[0, 1000]`. /// The range of component `m` is `[0, 1000]`. /// The range of component `a` is `[0, 1000]`. fn destroy(n: u32, m: u32, a: u32, ) -> Weight { - // Minimum execution time: 65_846 nanoseconds. - Weight::from_ref_time(66_082_000) - // Standard Error: 27_878 - .saturating_add(Weight::from_ref_time(26_747_590).saturating_mul(n.into())) - // Standard Error: 27_329 - .saturating_add(Weight::from_ref_time(315_839 as u64).saturating_mul(m.into())) - // Standard Error: 27_329 - .saturating_add(Weight::from_ref_time(217_497 as u64).saturating_mul(a.into())) - .saturating_add(RocksDbWeight::get().reads(3)) - .saturating_add(RocksDbWeight::get().reads((2_u64).saturating_mul(n.into()))) - .saturating_add(RocksDbWeight::get().writes(5)) - .saturating_add(RocksDbWeight::get().writes((5_u64).saturating_mul(n.into()))) + // Minimum execution time: 19_183_393 nanoseconds. + Weight::from_ref_time(17_061_526_855) + // Standard Error: 16_689 + .saturating_add(Weight::from_ref_time(353_523).saturating_mul(n.into())) + // Standard Error: 16_689 + .saturating_add(Weight::from_ref_time(1_861_080).saturating_mul(m.into())) + // Standard Error: 16_689 + .saturating_add(Weight::from_ref_time(8_858_987).saturating_mul(a.into())) + .saturating_add(RocksDbWeight::get().reads(1003)) + .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(a.into()))) + .saturating_add(RocksDbWeight::get().writes(3005)) + .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(m.into()))) + .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(a.into()))) } // Storage: Nfts CollectionConfigOf (r:1 w:0) // Storage: Nfts Item (r:1 w:1) @@ -526,8 +528,8 @@ impl WeightInfo for () { // Storage: Nfts ItemConfigOf (r:1 w:1) // Storage: Nfts Account (r:0 w:1) fn mint() -> Weight { - // Minimum execution time: 58_577 nanoseconds. - Weight::from_ref_time(59_058_000) + // Minimum execution time: 57_753 nanoseconds. + Weight::from_ref_time(58_313_000) .saturating_add(RocksDbWeight::get().reads(5)) .saturating_add(RocksDbWeight::get().writes(4)) } @@ -538,8 +540,8 @@ impl WeightInfo for () { // Storage: Nfts ItemConfigOf (r:1 w:1) // Storage: Nfts Account (r:0 w:1) fn force_mint() -> Weight { - // Minimum execution time: 56_494 nanoseconds. - Weight::from_ref_time(57_565_000) + // Minimum execution time: 56_429 nanoseconds. + Weight::from_ref_time(57_202_000) .saturating_add(RocksDbWeight::get().reads(5)) .saturating_add(RocksDbWeight::get().writes(4)) } @@ -552,8 +554,8 @@ impl WeightInfo for () { // Storage: Nfts ItemAttributesApprovalsOf (r:0 w:1) // Storage: Nfts PendingSwapOf (r:0 w:1) fn burn() -> Weight { - // Minimum execution time: 59_393 nanoseconds. - Weight::from_ref_time(60_562_000) + // Minimum execution time: 59_681 nanoseconds. + Weight::from_ref_time(60_058_000) .saturating_add(RocksDbWeight::get().reads(4)) .saturating_add(RocksDbWeight::get().writes(7)) } @@ -567,8 +569,8 @@ impl WeightInfo for () { // Storage: Nfts ItemPriceOf (r:0 w:1) // Storage: Nfts PendingSwapOf (r:0 w:1) fn transfer() -> Weight { - // Minimum execution time: 65_852 nanoseconds. - Weight::from_ref_time(66_308_000) + // Minimum execution time: 66_085 nanoseconds. + Weight::from_ref_time(67_065_000) .saturating_add(RocksDbWeight::get().reads(6)) .saturating_add(RocksDbWeight::get().writes(6)) } @@ -577,10 +579,10 @@ impl WeightInfo for () { // Storage: Nfts Item (r:102 w:102) /// The range of component `i` is `[0, 5000]`. fn redeposit(i: u32, ) -> Weight { - // Minimum execution time: 25_795 nanoseconds. - Weight::from_ref_time(26_128_000) - // Standard Error: 10_295 - .saturating_add(Weight::from_ref_time(11_202_286).saturating_mul(i.into())) + // Minimum execution time: 25_949 nanoseconds. + Weight::from_ref_time(26_106_000) + // Standard Error: 10_326 + .saturating_add(Weight::from_ref_time(11_496_776).saturating_mul(i.into())) .saturating_add(RocksDbWeight::get().reads(2)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(i.into()))) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(i.into()))) @@ -588,24 +590,24 @@ impl WeightInfo for () { // Storage: Nfts CollectionRoleOf (r:1 w:0) // Storage: Nfts ItemConfigOf (r:1 w:1) fn lock_item_transfer() -> Weight { - // Minimum execution time: 29_090 nanoseconds. - Weight::from_ref_time(29_772_000) + // Minimum execution time: 30_080 nanoseconds. + Weight::from_ref_time(30_825_000) .saturating_add(RocksDbWeight::get().reads(2)) .saturating_add(RocksDbWeight::get().writes(1)) } // Storage: Nfts CollectionRoleOf (r:1 w:0) // Storage: Nfts ItemConfigOf (r:1 w:1) fn unlock_item_transfer() -> Weight { - // Minimum execution time: 28_947 nanoseconds. - Weight::from_ref_time(29_559_000) + // Minimum execution time: 30_612 nanoseconds. + Weight::from_ref_time(31_422_000) .saturating_add(RocksDbWeight::get().reads(2)) .saturating_add(RocksDbWeight::get().writes(1)) } // Storage: Nfts CollectionRoleOf (r:1 w:0) // Storage: Nfts CollectionConfigOf (r:1 w:1) fn lock_collection() -> Weight { - // Minimum execution time: 26_972 nanoseconds. - Weight::from_ref_time(27_803_000) + // Minimum execution time: 27_470 nanoseconds. + Weight::from_ref_time(28_015_000) .saturating_add(RocksDbWeight::get().reads(2)) .saturating_add(RocksDbWeight::get().writes(1)) } @@ -613,40 +615,40 @@ impl WeightInfo for () { // Storage: Nfts Collection (r:1 w:1) // Storage: Nfts CollectionAccount (r:0 w:2) fn transfer_ownership() -> Weight { - // Minimum execution time: 32_165 nanoseconds. - Weight::from_ref_time(32_926_000) + // Minimum execution time: 33_750 nanoseconds. + Weight::from_ref_time(34_139_000) .saturating_add(RocksDbWeight::get().reads(2)) .saturating_add(RocksDbWeight::get().writes(4)) } // Storage: Nfts Collection (r:1 w:1) // Storage: Nfts CollectionRoleOf (r:0 w:4) fn set_team() -> Weight { - // Minimum execution time: 35_375 nanoseconds. - Weight::from_ref_time(35_950_000) + // Minimum execution time: 36_565 nanoseconds. + Weight::from_ref_time(37_464_000) .saturating_add(RocksDbWeight::get().reads(1)) .saturating_add(RocksDbWeight::get().writes(5)) } // Storage: Nfts Collection (r:1 w:1) // Storage: Nfts CollectionAccount (r:0 w:2) fn force_collection_owner() -> Weight { - // Minimum execution time: 27_470 nanoseconds. - Weight::from_ref_time(27_855_000) + // Minimum execution time: 29_028 nanoseconds. + Weight::from_ref_time(29_479_000) .saturating_add(RocksDbWeight::get().reads(1)) .saturating_add(RocksDbWeight::get().writes(3)) } // Storage: Nfts Collection (r:1 w:0) // Storage: Nfts CollectionConfigOf (r:0 w:1) fn force_collection_config() -> Weight { - // Minimum execution time: 23_990 nanoseconds. - Weight::from_ref_time(24_347_000) + // Minimum execution time: 24_695 nanoseconds. + Weight::from_ref_time(25_304_000) .saturating_add(RocksDbWeight::get().reads(1)) .saturating_add(RocksDbWeight::get().writes(1)) } // Storage: Nfts Collection (r:1 w:0) // Storage: Nfts ItemConfigOf (r:1 w:1) fn lock_item_properties() -> Weight { - // Minimum execution time: 28_481 nanoseconds. - Weight::from_ref_time(28_929_000) + // Minimum execution time: 28_910 nanoseconds. + Weight::from_ref_time(29_186_000) .saturating_add(RocksDbWeight::get().reads(2)) .saturating_add(RocksDbWeight::get().writes(1)) } @@ -655,16 +657,16 @@ impl WeightInfo for () { // Storage: Nfts ItemConfigOf (r:1 w:0) // Storage: Nfts Attribute (r:1 w:1) fn set_attribute() -> Weight { - // Minimum execution time: 54_435 nanoseconds. - Weight::from_ref_time(55_237_000) + // Minimum execution time: 56_407 nanoseconds. + Weight::from_ref_time(58_176_000) .saturating_add(RocksDbWeight::get().reads(4)) .saturating_add(RocksDbWeight::get().writes(2)) } // Storage: Nfts Collection (r:1 w:1) // Storage: Nfts Attribute (r:1 w:1) fn force_set_attribute() -> Weight { - // Minimum execution time: 35_254 nanoseconds. - Weight::from_ref_time(35_941_000) + // Minimum execution time: 36_402 nanoseconds. + Weight::from_ref_time(37_034_000) .saturating_add(RocksDbWeight::get().reads(2)) .saturating_add(RocksDbWeight::get().writes(2)) } @@ -672,16 +674,16 @@ impl WeightInfo for () { // Storage: Nfts Collection (r:1 w:1) // Storage: Nfts ItemConfigOf (r:1 w:0) fn clear_attribute() -> Weight { - // Minimum execution time: 51_483 nanoseconds. - Weight::from_ref_time(52_915_000) + // Minimum execution time: 52_022 nanoseconds. + Weight::from_ref_time(54_059_000) .saturating_add(RocksDbWeight::get().reads(3)) .saturating_add(RocksDbWeight::get().writes(2)) } // Storage: Nfts Item (r:1 w:0) // Storage: Nfts ItemAttributesApprovalsOf (r:1 w:1) fn approve_item_attributes() -> Weight { - // Minimum execution time: 27_929 nanoseconds. - Weight::from_ref_time(28_329_000) + // Minimum execution time: 28_475 nanoseconds. + Weight::from_ref_time(29_162_000) .saturating_add(RocksDbWeight::get().reads(2)) .saturating_add(RocksDbWeight::get().writes(1)) } @@ -691,10 +693,10 @@ impl WeightInfo for () { // Storage: System Account (r:1 w:1) /// The range of component `n` is `[0, 1000]`. fn cancel_item_attributes_approval(n: u32, ) -> Weight { - // Minimum execution time: 37_217 nanoseconds. - Weight::from_ref_time(37_692_000) - // Standard Error: 7_804 - .saturating_add(Weight::from_ref_time(7_344_173).saturating_mul(n.into())) + // Minimum execution time: 37_529 nanoseconds. + Weight::from_ref_time(38_023_000) + // Standard Error: 8_136 + .saturating_add(Weight::from_ref_time(7_452_872).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(4)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(RocksDbWeight::get().writes(2)) @@ -705,8 +707,8 @@ impl WeightInfo for () { // Storage: Nfts CollectionConfigOf (r:1 w:0) // Storage: Nfts ItemMetadataOf (r:1 w:1) fn set_metadata() -> Weight { - // Minimum execution time: 47_675 nanoseconds. - Weight::from_ref_time(48_282_000) + // Minimum execution time: 49_300 nanoseconds. + Weight::from_ref_time(49_790_000) .saturating_add(RocksDbWeight::get().reads(4)) .saturating_add(RocksDbWeight::get().writes(2)) } @@ -714,8 +716,8 @@ impl WeightInfo for () { // Storage: Nfts ItemConfigOf (r:1 w:0) // Storage: Nfts ItemMetadataOf (r:1 w:1) fn clear_metadata() -> Weight { - // Minimum execution time: 46_062 nanoseconds. - Weight::from_ref_time(46_854_000) + // Minimum execution time: 47_248 nanoseconds. + Weight::from_ref_time(48_094_000) .saturating_add(RocksDbWeight::get().reads(3)) .saturating_add(RocksDbWeight::get().writes(2)) } @@ -723,8 +725,8 @@ impl WeightInfo for () { // Storage: Nfts Collection (r:1 w:1) // Storage: Nfts CollectionMetadataOf (r:1 w:1) fn set_collection_metadata() -> Weight { - // Minimum execution time: 43_847 nanoseconds. - Weight::from_ref_time(44_792_000) + // Minimum execution time: 44_137 nanoseconds. + Weight::from_ref_time(44_905_000) .saturating_add(RocksDbWeight::get().reads(3)) .saturating_add(RocksDbWeight::get().writes(2)) } @@ -732,8 +734,8 @@ impl WeightInfo for () { // Storage: Nfts CollectionConfigOf (r:1 w:0) // Storage: Nfts CollectionMetadataOf (r:1 w:1) fn clear_collection_metadata() -> Weight { - // Minimum execution time: 42_403 nanoseconds. - Weight::from_ref_time(42_811_000) + // Minimum execution time: 43_005 nanoseconds. + Weight::from_ref_time(43_898_000) .saturating_add(RocksDbWeight::get().reads(3)) .saturating_add(RocksDbWeight::get().writes(1)) } @@ -741,47 +743,47 @@ impl WeightInfo for () { // Storage: Nfts CollectionConfigOf (r:1 w:0) // Storage: Nfts CollectionRoleOf (r:1 w:0) fn approve_transfer() -> Weight { - // Minimum execution time: 34_880 nanoseconds. - Weight::from_ref_time(35_737_000) + // Minimum execution time: 36_344 nanoseconds. + Weight::from_ref_time(36_954_000) .saturating_add(RocksDbWeight::get().reads(3)) .saturating_add(RocksDbWeight::get().writes(1)) } // Storage: Nfts Item (r:1 w:1) // Storage: Nfts CollectionRoleOf (r:1 w:0) fn cancel_approval() -> Weight { - // Minimum execution time: 31_606 nanoseconds. - Weight::from_ref_time(32_339_000) + // Minimum execution time: 32_418 nanoseconds. + Weight::from_ref_time(33_029_000) .saturating_add(RocksDbWeight::get().reads(2)) .saturating_add(RocksDbWeight::get().writes(1)) } // Storage: Nfts Item (r:1 w:1) // Storage: Nfts CollectionRoleOf (r:1 w:0) fn clear_all_transfer_approvals() -> Weight { - // Minimum execution time: 30_626 nanoseconds. - Weight::from_ref_time(31_043_000) + // Minimum execution time: 31_448 nanoseconds. + Weight::from_ref_time(31_979_000) .saturating_add(RocksDbWeight::get().reads(2)) .saturating_add(RocksDbWeight::get().writes(1)) } // Storage: Nfts OwnershipAcceptance (r:1 w:1) fn set_accept_ownership() -> Weight { - // Minimum execution time: 27_276 nanoseconds. - Weight::from_ref_time(28_016_000) + // Minimum execution time: 27_487 nanoseconds. + Weight::from_ref_time(28_080_000) .saturating_add(RocksDbWeight::get().reads(1)) .saturating_add(RocksDbWeight::get().writes(1)) } // Storage: Nfts CollectionConfigOf (r:1 w:1) // Storage: Nfts Collection (r:1 w:0) fn set_collection_max_supply() -> Weight { - // Minimum execution time: 28_366 nanoseconds. - Weight::from_ref_time(28_719_000) + // Minimum execution time: 28_235 nanoseconds. + Weight::from_ref_time(28_967_000) .saturating_add(RocksDbWeight::get().reads(2)) .saturating_add(RocksDbWeight::get().writes(1)) } // Storage: Nfts Collection (r:1 w:0) // Storage: Nfts CollectionConfigOf (r:1 w:1) fn update_mint_settings() -> Weight { - // Minimum execution time: 27_292 nanoseconds. - Weight::from_ref_time(27_614_000) + // Minimum execution time: 28_172 nanoseconds. + Weight::from_ref_time(28_636_000) .saturating_add(RocksDbWeight::get().reads(2)) .saturating_add(RocksDbWeight::get().writes(1)) } @@ -790,8 +792,8 @@ impl WeightInfo for () { // Storage: Nfts ItemConfigOf (r:1 w:0) // Storage: Nfts ItemPriceOf (r:0 w:1) fn set_price() -> Weight { - // Minimum execution time: 34_133 nanoseconds. - Weight::from_ref_time(34_510_000) + // Minimum execution time: 35_336 nanoseconds. + Weight::from_ref_time(36_026_000) .saturating_add(RocksDbWeight::get().reads(3)) .saturating_add(RocksDbWeight::get().writes(1)) } @@ -804,31 +806,31 @@ impl WeightInfo for () { // Storage: Nfts Account (r:0 w:2) // Storage: Nfts PendingSwapOf (r:0 w:1) fn buy_item() -> Weight { - // Minimum execution time: 69_501 nanoseconds. - Weight::from_ref_time(70_342_000) + // Minimum execution time: 70_971 nanoseconds. + Weight::from_ref_time(72_036_000) .saturating_add(RocksDbWeight::get().reads(6)) .saturating_add(RocksDbWeight::get().writes(6)) } /// The range of component `n` is `[0, 10]`. fn pay_tips(n: u32, ) -> Weight { - // Minimum execution time: 4_754 nanoseconds. - Weight::from_ref_time(11_356_736) - // Standard Error: 38_352 - .saturating_add(Weight::from_ref_time(3_427_961).saturating_mul(n.into())) + // Minimum execution time: 5_151 nanoseconds. + Weight::from_ref_time(11_822_888) + // Standard Error: 38_439 + .saturating_add(Weight::from_ref_time(3_511_844).saturating_mul(n.into())) } // Storage: Nfts Item (r:2 w:0) // Storage: Nfts PendingSwapOf (r:0 w:1) fn create_swap() -> Weight { - // Minimum execution time: 31_371 nanoseconds. - Weight::from_ref_time(32_227_000) + // Minimum execution time: 33_027 nanoseconds. + Weight::from_ref_time(33_628_000) .saturating_add(RocksDbWeight::get().reads(2)) .saturating_add(RocksDbWeight::get().writes(1)) } // Storage: Nfts PendingSwapOf (r:1 w:1) // Storage: Nfts Item (r:1 w:0) fn cancel_swap() -> Weight { - // Minimum execution time: 34_114 nanoseconds. - Weight::from_ref_time(34_779_000) + // Minimum execution time: 35_890 nanoseconds. + Weight::from_ref_time(36_508_000) .saturating_add(RocksDbWeight::get().reads(2)) .saturating_add(RocksDbWeight::get().writes(1)) } @@ -841,8 +843,8 @@ impl WeightInfo for () { // Storage: Nfts Account (r:0 w:4) // Storage: Nfts ItemPriceOf (r:0 w:2) fn claim_swap() -> Weight { - // Minimum execution time: 97_965 nanoseconds. - Weight::from_ref_time(98_699_000) + // Minimum execution time: 101_076 nanoseconds. + Weight::from_ref_time(101_863_000) .saturating_add(RocksDbWeight::get().reads(8)) .saturating_add(RocksDbWeight::get().writes(11)) } From 72501e442aab5e1078a66cd27fbc407ac6a0275a Mon Sep 17 00:00:00 2001 From: Jegor Sidorenko Date: Thu, 22 Dec 2022 15:44:16 +0200 Subject: [PATCH 064/101] Fmt --- frame/nft-fractionalisation/src/lib.rs | 6 +++--- primitives/keystore/src/testing.rs | 7 ++----- 2 files changed, 5 insertions(+), 8 deletions(-) diff --git a/frame/nft-fractionalisation/src/lib.rs b/frame/nft-fractionalisation/src/lib.rs index 2589d12410eba..dd0aecb1fccf6 100644 --- a/frame/nft-fractionalisation/src/lib.rs +++ b/frame/nft-fractionalisation/src/lib.rs @@ -8,8 +8,8 @@ mod mock; #[cfg(test)] mod tests; -#[cfg(feature = "runtime-benchmarks")] -mod benchmarking; +// #[cfg(feature = "runtime-benchmarks")] +// mod benchmarking; pub use scale_info::Type; @@ -133,7 +133,7 @@ pub mod pallet { #[pallet::error] pub enum Error { AssetDataNotFound, - NFTDataNotFound + NFTDataNotFound, } #[pallet::call] diff --git a/primitives/keystore/src/testing.rs b/primitives/keystore/src/testing.rs index c111fffb7246c..a9ec6709d912a 100644 --- a/primitives/keystore/src/testing.rs +++ b/primitives/keystore/src/testing.rs @@ -368,11 +368,8 @@ impl SyncCryptoStore for KeyStore { transcript_data: VRFTranscriptData, ) -> Result, Error> { let transcript = make_transcript(transcript_data); - let pair = if let Some(k) = self.sr25519_key_pair(key_type, public) { - k - } else { - return Ok(None) - }; + let pair = + if let Some(k) = self.sr25519_key_pair(key_type, public) { k } else { return Ok(None) }; let (inout, proof, _) = pair.as_ref().vrf_sign(transcript); Ok(Some(VRFSignature { output: inout.to_output(), proof })) From 7df1e2cef75604ac0cb57d23ba465a9bf994672a Mon Sep 17 00:00:00 2001 From: Jegor Sidorenko <5252494+jsidorenko@users.noreply.github.com> Date: Fri, 23 Dec 2022 13:08:28 +0200 Subject: [PATCH 065/101] Update frame/nfts/src/common_functions.rs Co-authored-by: joe petrowski <25483142+joepetrowski@users.noreply.github.com> --- frame/nfts/src/common_functions.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/frame/nfts/src/common_functions.rs b/frame/nfts/src/common_functions.rs index b3cac7f69ec0e..9c0faeb6b7c77 100644 --- a/frame/nfts/src/common_functions.rs +++ b/frame/nfts/src/common_functions.rs @@ -25,7 +25,7 @@ impl, I: 'static> Pallet { Item::::get(collection, item).map(|i| i.owner) } - /// Get the owner of the item, if the item exists. + /// Get the owner of the collection, if the collection exists. pub fn collection_owner(collection: T::CollectionId) -> Option { Collection::::get(collection).map(|i| i.owner) } From 2323614a3570ab102f687bfbb920cd3bcf8797b5 Mon Sep 17 00:00:00 2001 From: Jegor Sidorenko <5252494+jsidorenko@users.noreply.github.com> Date: Fri, 23 Dec 2022 13:08:37 +0200 Subject: [PATCH 066/101] Update frame/nfts/src/types.rs Co-authored-by: joe petrowski <25483142+joepetrowski@users.noreply.github.com> --- frame/nfts/src/types.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/frame/nfts/src/types.rs b/frame/nfts/src/types.rs index 4cb92d692d7f7..be9951e635028 100644 --- a/frame/nfts/src/types.rs +++ b/frame/nfts/src/types.rs @@ -66,7 +66,7 @@ pub trait Incrementable { } impl_incrementable!(u8, u16, u32, u64, u128, i8, i16, i32, i64, i128); -/// Information about the collection. +/// Information about a collection. #[derive(Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug, TypeInfo, MaxEncodedLen)] pub struct CollectionDetails { /// Collection's owner. From 08f0ea22a9da3eafb3dea5b9fb0be529ad044ea5 Mon Sep 17 00:00:00 2001 From: Jegor Sidorenko <5252494+jsidorenko@users.noreply.github.com> Date: Fri, 23 Dec 2022 13:08:49 +0200 Subject: [PATCH 067/101] Update frame/nfts/src/types.rs Co-authored-by: joe petrowski <25483142+joepetrowski@users.noreply.github.com> --- frame/nfts/src/types.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/frame/nfts/src/types.rs b/frame/nfts/src/types.rs index be9951e635028..e50be12e394c9 100644 --- a/frame/nfts/src/types.rs +++ b/frame/nfts/src/types.rs @@ -71,7 +71,7 @@ impl_incrementable!(u8, u16, u32, u64, u128, i8, i16, i32, i64, i128); pub struct CollectionDetails { /// Collection's owner. pub(super) owner: AccountId, - /// The total balance deposited by the owner for the all storage data associated with this + /// The total balance deposited by the owner for all the storage data associated with this /// collection. Used by `destroy`. pub(super) owner_deposit: DepositBalance, /// The total number of outstanding items of this collection. From 66cbec6d856203a1d22c1ff09f4424d9717357b1 Mon Sep 17 00:00:00 2001 From: Jegor Sidorenko <5252494+jsidorenko@users.noreply.github.com> Date: Fri, 23 Dec 2022 13:09:14 +0200 Subject: [PATCH 068/101] Update frame/nfts/src/types.rs Co-authored-by: joe petrowski <25483142+joepetrowski@users.noreply.github.com> --- frame/nfts/src/types.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/frame/nfts/src/types.rs b/frame/nfts/src/types.rs index e50be12e394c9..662c64594f228 100644 --- a/frame/nfts/src/types.rs +++ b/frame/nfts/src/types.rs @@ -167,9 +167,9 @@ pub struct ItemMetadata> { /// Information about the tip. #[derive(Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug, TypeInfo, MaxEncodedLen)] pub struct ItemTip { - /// A collection of the item. + /// The collection of the item. pub(super) collection: CollectionId, - /// An item of which the tip is send for. + /// An item of which the tip is sent for. pub(super) item: ItemId, /// A sender of the tip. pub(super) receiver: AccountId, From 3218b20a1903cc6372dc76ea30f9e9269d84bd74 Mon Sep 17 00:00:00 2001 From: Jegor Sidorenko <5252494+jsidorenko@users.noreply.github.com> Date: Fri, 23 Dec 2022 13:09:47 +0200 Subject: [PATCH 069/101] Update frame/nfts/src/types.rs Co-authored-by: joe petrowski <25483142+joepetrowski@users.noreply.github.com> --- frame/nfts/src/types.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/frame/nfts/src/types.rs b/frame/nfts/src/types.rs index 662c64594f228..4bd80c18c8547 100644 --- a/frame/nfts/src/types.rs +++ b/frame/nfts/src/types.rs @@ -180,9 +180,9 @@ pub struct ItemTip { /// Information about the pending swap. #[derive(Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug, Default, TypeInfo, MaxEncodedLen)] pub struct PendingSwap { - /// A collection of the item user wants to receive. + /// The collection that contains the item that the user wants to receive. pub(super) desired_collection: CollectionId, - /// An item user wants to receive. + /// The item the user wants to receive. pub(super) desired_item: Option, /// A price for the desired `item` with the direction. pub(super) price: Option, From 40ecc6011775d13b25a0e46b689b75ec47bf2fe5 Mon Sep 17 00:00:00 2001 From: Jegor Sidorenko <5252494+jsidorenko@users.noreply.github.com> Date: Fri, 23 Dec 2022 13:10:45 +0200 Subject: [PATCH 070/101] Update frame/nfts/src/types.rs Co-authored-by: joe petrowski <25483142+joepetrowski@users.noreply.github.com> --- frame/nfts/src/types.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/frame/nfts/src/types.rs b/frame/nfts/src/types.rs index 4bd80c18c8547..58b1acaaedf42 100644 --- a/frame/nfts/src/types.rs +++ b/frame/nfts/src/types.rs @@ -199,10 +199,10 @@ pub struct AttributeDeposit { pub(super) amount: DepositBalance, } -/// Specifies whether the tokens will be send or received. +/// Specifies whether the tokens will be sent or received. #[derive(Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug, TypeInfo, MaxEncodedLen)] pub enum PriceDirection { - /// Tokens will be send. + /// Tokens will be sent. Send, /// Tokens will be received. Receive, From 30c0f23ff431ac1b8afe045370725a1fc1316e1f Mon Sep 17 00:00:00 2001 From: Jegor Sidorenko Date: Fri, 23 Dec 2022 15:34:26 +0200 Subject: [PATCH 071/101] Add call indexes --- frame/nfts/src/lib.rs | 37 +++++++++++++++++++++++++++++++++++++ 1 file changed, 37 insertions(+) diff --git a/frame/nfts/src/lib.rs b/frame/nfts/src/lib.rs index 8471670b58974..2006d78959c4d 100644 --- a/frame/nfts/src/lib.rs +++ b/frame/nfts/src/lib.rs @@ -606,6 +606,7 @@ pub mod pallet { /// Emits `Created` event when successful. /// /// Weight: `O(1)` + #[pallet::call_index(0)] #[pallet::weight(T::WeightInfo::create())] pub fn create( origin: OriginFor, @@ -649,6 +650,7 @@ pub mod pallet { /// Emits `ForceCreated` event when successful. /// /// Weight: `O(1)` + #[pallet::call_index(1)] #[pallet::weight(T::WeightInfo::force_create())] pub fn force_create( origin: OriginFor, @@ -686,6 +688,7 @@ pub mod pallet { /// - `n = witness.items` /// - `m = witness.item_metadatas` /// - `a = witness.attributes` + #[pallet::call_index(2)] #[pallet::weight(T::WeightInfo::destroy( witness.items, witness.item_metadatas, @@ -724,6 +727,7 @@ pub mod pallet { /// Emits `Issued` event when successful. /// /// Weight: `O(1)` + #[pallet::call_index(3)] #[pallet::weight(T::WeightInfo::mint())] pub fn mint( origin: OriginFor, @@ -825,6 +829,7 @@ pub mod pallet { /// Emits `Issued` event when successful. /// /// Weight: `O(1)` + #[pallet::call_index(4)] #[pallet::weight(T::WeightInfo::force_mint())] pub fn force_mint( origin: OriginFor, @@ -862,6 +867,7 @@ pub mod pallet { /// /// Weight: `O(1)` /// Modes: `check_owner.is_some()`. + #[pallet::call_index(5)] #[pallet::weight(T::WeightInfo::burn())] pub fn burn( origin: OriginFor, @@ -899,6 +905,7 @@ pub mod pallet { /// Emits `Transferred`. /// /// Weight: `O(1)` + #[pallet::call_index(6)] #[pallet::weight(T::WeightInfo::transfer())] pub fn transfer( origin: OriginFor, @@ -940,6 +947,7 @@ pub mod pallet { /// is not permitted to call it. /// /// Weight: `O(items.len())` + #[pallet::call_index(7)] #[pallet::weight(T::WeightInfo::redeposit(items.len() as u32))] pub fn redeposit( origin: OriginFor, @@ -999,6 +1007,7 @@ pub mod pallet { /// Emits `ItemTransferLocked`. /// /// Weight: `O(1)` + #[pallet::call_index(8)] #[pallet::weight(T::WeightInfo::lock_item_transfer())] pub fn lock_item_transfer( origin: OriginFor, @@ -1019,6 +1028,7 @@ pub mod pallet { /// Emits `ItemTransferUnlocked`. /// /// Weight: `O(1)` + #[pallet::call_index(9)] #[pallet::weight(T::WeightInfo::unlock_item_transfer())] pub fn unlock_item_transfer( origin: OriginFor, @@ -1040,6 +1050,7 @@ pub mod pallet { /// Emits `CollectionLocked`. /// /// Weight: `O(1)` + #[pallet::call_index(10)] #[pallet::weight(T::WeightInfo::lock_collection())] pub fn lock_collection( origin: OriginFor, @@ -1061,6 +1072,7 @@ pub mod pallet { /// Emits `OwnerChanged`. /// /// Weight: `O(1)` + #[pallet::call_index(11)] #[pallet::weight(T::WeightInfo::transfer_ownership())] pub fn transfer_ownership( origin: OriginFor, @@ -1085,6 +1097,7 @@ pub mod pallet { /// Emits `TeamChanged`. /// /// Weight: `O(1)` + #[pallet::call_index(12)] #[pallet::weight(T::WeightInfo::set_team())] pub fn set_team( origin: OriginFor, @@ -1112,6 +1125,7 @@ pub mod pallet { /// Emits `OwnerChanged`. /// /// Weight: `O(1)` + #[pallet::call_index(13)] #[pallet::weight(T::WeightInfo::force_collection_owner())] pub fn force_collection_owner( origin: OriginFor, @@ -1133,6 +1147,7 @@ pub mod pallet { /// Emits `CollectionConfigChanged`. /// /// Weight: `O(1)` + #[pallet::call_index(14)] #[pallet::weight(T::WeightInfo::force_collection_config())] pub fn force_collection_config( origin: OriginFor, @@ -1157,6 +1172,7 @@ pub mod pallet { /// Emits `TransferApproved` on success. /// /// Weight: `O(1)` + #[pallet::call_index(15)] #[pallet::weight(T::WeightInfo::approve_transfer())] pub fn approve_transfer( origin: OriginFor, @@ -1193,6 +1209,7 @@ pub mod pallet { /// Emits `ApprovalCancelled` on success. /// /// Weight: `O(1)` + #[pallet::call_index(16)] #[pallet::weight(T::WeightInfo::cancel_approval())] pub fn cancel_approval( origin: OriginFor, @@ -1221,6 +1238,7 @@ pub mod pallet { /// Emits `AllApprovalsCancelled` on success. /// /// Weight: `O(1)` + #[pallet::call_index(17)] #[pallet::weight(T::WeightInfo::clear_all_transfer_approvals())] pub fn clear_all_transfer_approvals( origin: OriginFor, @@ -1250,6 +1268,7 @@ pub mod pallet { /// Emits `ItemPropertiesLocked`. /// /// Weight: `O(1)` + #[pallet::call_index(18)] #[pallet::weight(T::WeightInfo::lock_item_properties())] pub fn lock_item_properties( origin: OriginFor, @@ -1292,6 +1311,7 @@ pub mod pallet { /// Emits `AttributeSet`. /// /// Weight: `O(1)` + #[pallet::call_index(19)] #[pallet::weight(T::WeightInfo::set_attribute())] pub fn set_attribute( origin: OriginFor, @@ -1322,6 +1342,7 @@ pub mod pallet { /// Emits `AttributeSet`. /// /// Weight: `O(1)` + #[pallet::call_index(20)] #[pallet::weight(T::WeightInfo::force_set_attribute())] pub fn force_set_attribute( origin: OriginFor, @@ -1351,6 +1372,7 @@ pub mod pallet { /// Emits `AttributeCleared`. /// /// Weight: `O(1)` + #[pallet::call_index(21)] #[pallet::weight(T::WeightInfo::clear_attribute())] pub fn clear_attribute( origin: OriginFor, @@ -1374,6 +1396,7 @@ pub mod pallet { /// - `delegate`: The account to delegate permission to change attributes of the item. /// /// Emits `ItemAttributesApprovalAdded` on success. + #[pallet::call_index(22)] #[pallet::weight(T::WeightInfo::approve_item_attributes())] pub fn approve_item_attributes( origin: OriginFor, @@ -1396,6 +1419,7 @@ pub mod pallet { /// - `delegate`: The previously approved account to remove. /// /// Emits `ItemAttributesApprovalRemoved` on success. + #[pallet::call_index(23)] #[pallet::weight(T::WeightInfo::cancel_item_attributes_approval( witness.account_attributes ))] @@ -1427,6 +1451,7 @@ pub mod pallet { /// Emits `ItemMetadataSet`. /// /// Weight: `O(1)` + #[pallet::call_index(24)] #[pallet::weight(T::WeightInfo::set_metadata())] pub fn set_metadata( origin: OriginFor, @@ -1453,6 +1478,7 @@ pub mod pallet { /// Emits `ItemMetadataCleared`. /// /// Weight: `O(1)` + #[pallet::call_index(25)] #[pallet::weight(T::WeightInfo::clear_metadata())] pub fn clear_metadata( origin: OriginFor, @@ -1480,6 +1506,7 @@ pub mod pallet { /// Emits `CollectionMetadataSet`. /// /// Weight: `O(1)` + #[pallet::call_index(26)] #[pallet::weight(T::WeightInfo::set_collection_metadata())] pub fn set_collection_metadata( origin: OriginFor, @@ -1504,6 +1531,7 @@ pub mod pallet { /// Emits `CollectionMetadataCleared`. /// /// Weight: `O(1)` + #[pallet::call_index(27)] #[pallet::weight(T::WeightInfo::clear_collection_metadata())] pub fn clear_collection_metadata( origin: OriginFor, @@ -1525,6 +1553,7 @@ pub mod pallet { /// ownership transferal. /// /// Emits `OwnershipAcceptanceChanged`. + #[pallet::call_index(28)] #[pallet::weight(T::WeightInfo::set_accept_ownership())] pub fn set_accept_ownership( origin: OriginFor, @@ -1543,6 +1572,7 @@ pub mod pallet { /// - `max_supply`: The maximum number of items a collection could have. /// /// Emits `CollectionMaxSupplySet` event when successful. + #[pallet::call_index(29)] #[pallet::weight(T::WeightInfo::set_collection_max_supply())] pub fn set_collection_max_supply( origin: OriginFor, @@ -1564,6 +1594,7 @@ pub mod pallet { /// - `mint_settings`: The new mint settings. /// /// Emits `CollectionMintSettingsUpdated` event when successful. + #[pallet::call_index(30)] #[pallet::weight(T::WeightInfo::update_mint_settings())] pub fn update_mint_settings( origin: OriginFor, @@ -1591,6 +1622,7 @@ pub mod pallet { /// /// Emits `ItemPriceSet` on success if the price is not `None`. /// Emits `ItemPriceRemoved` on success if the price is `None`. + #[pallet::call_index(31)] #[pallet::weight(T::WeightInfo::set_price())] pub fn set_price( origin: OriginFor, @@ -1613,6 +1645,7 @@ pub mod pallet { /// - `bid_price`: The price the sender is willing to pay. /// /// Emits `ItemBought` on success. + #[pallet::call_index(32)] #[pallet::weight(T::WeightInfo::buy_item())] pub fn buy_item( origin: OriginFor, @@ -1631,6 +1664,7 @@ pub mod pallet { /// - `tips`: Tips array. /// /// Emits `TipSent` on every tip transfer. + #[pallet::call_index(33)] #[pallet::weight(T::WeightInfo::pay_tips(tips.len() as u32))] pub fn pay_tips( origin: OriginFor, @@ -1656,6 +1690,7 @@ pub mod pallet { /// after which the swap will expire. /// /// Emits `SwapCreated` on success. + #[pallet::call_index(34)] #[pallet::weight(T::WeightInfo::create_swap())] pub fn create_swap( origin: OriginFor, @@ -1687,6 +1722,7 @@ pub mod pallet { /// - `item`: The item an owner wants to give. /// /// Emits `SwapCancelled` on success. + #[pallet::call_index(35)] #[pallet::weight(T::WeightInfo::cancel_swap())] pub fn cancel_swap( origin: OriginFor, @@ -1709,6 +1745,7 @@ pub mod pallet { /// - `witness_price`: A price that was previously agreed on. /// /// Emits `SwapClaimed` on success. + #[pallet::call_index(36)] #[pallet::weight(T::WeightInfo::claim_swap())] pub fn claim_swap( origin: OriginFor, From f61854336690154685770b9b6f4020a42d602b1f Mon Sep 17 00:00:00 2001 From: Jegor Sidorenko Date: Fri, 23 Dec 2022 16:36:17 +0200 Subject: [PATCH 072/101] Update snapshots --- .../pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.stderr | 4 ++-- .../storage_ensure_span_are_ok_on_wrong_gen_unnamed.stderr | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.stderr b/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.stderr index ac5a1f46f8a6b..364eb5e6d5bb1 100644 --- a/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.stderr +++ b/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.stderr @@ -28,7 +28,7 @@ error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied <&[(T,)] as EncodeLike>> <&[(T,)] as EncodeLike>> <&[T] as EncodeLike>> - and 280 others + and 281 others = note: required for `Bar` to implement `FullEncode` = note: required for `Bar` to implement `FullCodec` = note: required for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` to implement `PartialStorageInfoTrait` @@ -103,7 +103,7 @@ error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied <&[(T,)] as EncodeLike>> <&[(T,)] as EncodeLike>> <&[T] as EncodeLike>> - and 280 others + and 281 others = note: required for `Bar` to implement `FullEncode` = note: required for `Bar` to implement `FullCodec` = note: required for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` to implement `StorageEntryMetadataBuilder` diff --git a/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.stderr b/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.stderr index e0c1609403c3a..371e90323d9cb 100644 --- a/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.stderr +++ b/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.stderr @@ -28,7 +28,7 @@ error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied <&[(T,)] as EncodeLike>> <&[(T,)] as EncodeLike>> <&[T] as EncodeLike>> - and 280 others + and 281 others = note: required for `Bar` to implement `FullEncode` = note: required for `Bar` to implement `FullCodec` = note: required for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` to implement `PartialStorageInfoTrait` @@ -103,7 +103,7 @@ error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied <&[(T,)] as EncodeLike>> <&[(T,)] as EncodeLike>> <&[T] as EncodeLike>> - and 280 others + and 281 others = note: required for `Bar` to implement `FullEncode` = note: required for `Bar` to implement `FullCodec` = note: required for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` to implement `StorageEntryMetadataBuilder` From 553d262bd4975641e60b051cbe93a35a2e3dd636 Mon Sep 17 00:00:00 2001 From: Jegor Sidorenko <5252494+jsidorenko@users.noreply.github.com> Date: Fri, 27 Jan 2023 11:52:32 +0100 Subject: [PATCH 073/101] Refactor nft fractionalisation pallet (#13008) * Refactoring * Make it compile * Add tests * Rename * Rework nfts locking * Update cargo.lock * Connect the latest changes to the runtime-kitchensink * Add benchmarks, fix other issues * Chore * Chore 2 * Chore 3 * Add runtime-benchmarks * Rename * Set metadata * Make fields public * Chore * Created asset shouldn't be sufficient * Add documentation * minor edit to docs * Minor corrections Co-authored-by: lana-shanghai --- Cargo.lock | 1886 +++++++++-------- Cargo.toml | 2 +- bin/node/runtime/Cargo.toml | 7 +- bin/node/runtime/src/lib.rs | 64 +- frame/assets/src/functions.rs | 14 +- frame/assets/src/impl_fungibles.rs | 13 + frame/nft-fractionalisation/README.md | 4 - frame/nft-fractionalisation/src/lib.rs | 298 --- frame/nft-fractionalisation/src/mock.rs | 150 -- frame/nft-fractionalisation/src/tests.rs | 16 - .../Cargo.toml | 2 +- frame/nft-fractionalization/README.md | 3 + .../nft-fractionalization/src/benchmarking.rs | 129 ++ frame/nft-fractionalization/src/lib.rs | 359 ++++ frame/nft-fractionalization/src/mock.rs | 206 ++ frame/nft-fractionalization/src/tests.rs | 235 ++ frame/nft-fractionalization/src/types.rs | 31 + frame/nft-fractionalization/src/weights.rs | 86 + .../src/traits/tokens/fungibles/metadata.rs | 5 + .../src/traits/tokens/nonfungibles_v2.rs | 8 + 20 files changed, 2136 insertions(+), 1382 deletions(-) delete mode 100644 frame/nft-fractionalisation/README.md delete mode 100644 frame/nft-fractionalisation/src/lib.rs delete mode 100644 frame/nft-fractionalisation/src/mock.rs delete mode 100644 frame/nft-fractionalisation/src/tests.rs rename frame/{nft-fractionalisation => nft-fractionalization}/Cargo.toml (97%) create mode 100644 frame/nft-fractionalization/README.md create mode 100644 frame/nft-fractionalization/src/benchmarking.rs create mode 100644 frame/nft-fractionalization/src/lib.rs create mode 100644 frame/nft-fractionalization/src/mock.rs create mode 100644 frame/nft-fractionalization/src/tests.rs create mode 100644 frame/nft-fractionalization/src/types.rs create mode 100644 frame/nft-fractionalization/src/weights.rs diff --git a/Cargo.lock b/Cargo.lock index e9d4a2b290591..bf0b225c89cca 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -18,7 +18,7 @@ version = "0.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b9ecd88a8c8378ca913a680cd98f0f13ac67383d35993f86c90a70e3f137816b" dependencies = [ - "gimli 0.26.1", + "gimli 0.26.2", ] [[package]] @@ -42,7 +42,7 @@ version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0b613b8e1e3cf911a086f53f03bf286f52fd7a7258e4fa606f0ef220d39d8877" dependencies = [ - "generic-array 0.14.4", + "generic-array 0.14.6", ] [[package]] @@ -77,20 +77,29 @@ version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fcb51a0695d8f838b1ee009b3fbf66bda078cd64590202a864a8f3e8c4315c47" dependencies = [ - "getrandom 0.2.3", + "getrandom 0.2.8", "once_cell", "version_check", ] [[package]] name = "aho-corasick" -version = "0.7.18" +version = "0.7.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e37cfd5e7657ada45f742d6e99ca5788580b5c529dc78faf11ece6dc702656f" +checksum = "cc936419f96fa211c1b9166887b38e5e40b19958e5b895be7c1f93adec7071ac" dependencies = [ "memchr", ] +[[package]] +name = "android_system_properties" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "819e7219dbd41043ac279b19830f2efc897156490d7fd6ea916720117ee66311" +dependencies = [ + "libc", +] + [[package]] name = "ansi_term" version = "0.12.1" @@ -102,30 +111,30 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.66" +version = "1.0.68" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "216261ddc8289130e551ddcd5ce8a064710c0d064a4d2895c67151c92b5443f6" +checksum = "2cb2f989d18dd141ab8ae82f64d1a8cdd37e0840f73a406896cf5e99502fab61" [[package]] name = "approx" -version = "0.5.0" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "072df7202e63b127ab55acfe16ce97013d5b97bf160489336d3f1840fd78e99e" +checksum = "cab112f0a86d568ea0e627cc1d6be74a1e9cd55214684db5561995f6dad897c6" dependencies = [ "num-traits", ] [[package]] name = "arbitrary" -version = "1.0.0" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "698b65a961a9d730fb45b6b0327e20207810c9f61ee421b082b27ba003f49e2b" +checksum = "29d47fbf90d5149a107494b15a7dc8d69b351be2db3bb9691740e88ec17fd880" [[package]] name = "array-bytes" -version = "4.1.0" +version = "4.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a913633b0c922e6b745072795f50d90ebea78ba31a57e2ac8c2fc7b50950949" +checksum = "f52f63c5c1316a16a4b35eaac8b76a98248961a533f061684cb2a7cb0eafb6c6" [[package]] name = "arrayref" @@ -147,17 +156,17 @@ checksum = "8da52d66c7071e2e3fa2a1e5c6d088fec47b593032b254f5e980de8ea54454d6" [[package]] name = "asn1_der" -version = "0.7.4" +version = "0.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d6e24d2cce90c53b948c46271bfb053e4bdc2db9b5d3f65e20f8cf28a1b7fc3" +checksum = "e22d1f4b888c298a027c99dc9048015fac177587de20fc30232a057dfbe24a21" [[package]] name = "assert_cmd" -version = "2.0.6" +version = "2.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba45b8163c49ab5f972e59a8a5a03b6d2972619d486e19ec9fe744f7c2753d3c" +checksum = "fa3d466004a8b4cb1bc34044240a2fd29d17607e2e3bd613eb44fd48e8100da3" dependencies = [ - "bstr 1.0.1", + "bstr 1.1.0", "doc-comment", "predicates", "predicates-core", @@ -173,9 +182,9 @@ checksum = "9b34d609dfbaf33d6889b2b7106d3ca345eacad44200913df5ba02bfd31d2ba9" [[package]] name = "async-channel" -version = "1.6.1" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2114d64672151c0c5eaa5e131ec84a74f06e1e559830dabba01ca30605d66319" +checksum = "cf46fee83e5ccffc220104713af3292ff9bc7c64c7de289f66dae8e38d826833" dependencies = [ "concurrent-queue", "event-listener", @@ -184,76 +193,68 @@ dependencies = [ [[package]] name = "async-executor" -version = "1.4.0" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb877970c7b440ead138f6321a3b5395d6061183af779340b65e20c0fede9146" +checksum = "17adb73da160dfb475c183343c8cccd80721ea5a605d3eb57125f0a7b7a92d0b" dependencies = [ + "async-lock", "async-task", "concurrent-queue", "fastrand", "futures-lite", - "once_cell", - "vec-arena", + "slab", ] [[package]] name = "async-global-executor" -version = "2.0.2" +version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9586ec52317f36de58453159d48351bc244bc24ced3effc1fce22f3d48664af6" +checksum = "f1b6f5d7df27bd294849f8eec66ecfc63d11814df7a4f5d74168a2394467b776" dependencies = [ "async-channel", "async-executor", "async-io", - "async-mutex", + "async-lock", "blocking", "futures-lite", - "num_cpus", "once_cell", ] [[package]] name = "async-io" -version = "1.6.0" +version = "1.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a811e6a479f2439f0c04038796b5cfb3d2ad56c230e0f2d3f7b04d68cfee607b" +checksum = "8c374dda1ed3e7d8f0d9ba58715f924862c63eae6849c92d3a18e7fbde9e2794" dependencies = [ + "async-lock", + "autocfg", "concurrent-queue", "futures-lite", "libc", "log", - "once_cell", "parking", "polling", "slab", "socket2", "waker-fn", - "winapi", + "windows-sys 0.42.0", ] [[package]] name = "async-lock" -version = "2.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e97a171d191782fba31bb902b14ad94e24a68145032b7eedf871ab0bc0d077b6" -dependencies = [ - "event-listener", -] - -[[package]] -name = "async-mutex" -version = "1.4.0" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "479db852db25d9dbf6204e6cb6253698f175c15726470f78af0d918e99d6156e" +checksum = "c8101efe8695a6c17e02911402145357e718ac92d3ff88ae8419e84b1707b685" dependencies = [ "event-listener", + "futures-lite", ] [[package]] name = "async-stream" -version = "0.3.2" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "171374e7e3b2504e0e5236e3b59260560f9fe94bfe9ac39ba5e4e929c5590625" +checksum = "dad5c83079eae9969be7fadefe640a1c566901f05ff91ab221de4b6f68d9507e" dependencies = [ "async-stream-impl", "futures-core", @@ -261,9 +262,9 @@ dependencies = [ [[package]] name = "async-stream-impl" -version = "0.3.2" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "648ed8c8d2ce5409ccd57453d9d1b214b342a0d69376a6feda1fd6cae3299308" +checksum = "10f203db73a71dfa2fb6dd22763990fa26f3d2625a6da2da900d23b87d26be27" dependencies = [ "proc-macro2", "quote", @@ -272,15 +273,15 @@ dependencies = [ [[package]] name = "async-task" -version = "4.0.3" +version = "4.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e91831deabf0d6d7ec49552e489aed63b7456a7a3c46cff62adad428110b0af0" +checksum = "7a40729d2133846d9ed0ea60a8b9541bccddab49cd30f0715a1da672fe9a2524" [[package]] name = "async-trait" -version = "0.1.59" +version = "0.1.60" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31e6e93155431f3931513b243d371981bb2770112b370c82745a1d19d2f99364" +checksum = "677d1d8ab452a3936018a687b20e6f7cf5363d713b732b8884001317b0e48aa3" dependencies = [ "proc-macro2", "quote", @@ -289,9 +290,9 @@ dependencies = [ [[package]] name = "asynchronous-codec" -version = "0.6.0" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0de5164e5edbf51c45fb8c2d9664ae1c095cce1b265ecf7569093c0d66ef690" +checksum = "06a0daa378f5fd10634e44b0a29b2a87b890657658e072a30d6f26e57ddee182" dependencies = [ "bytes", "futures-sink", @@ -312,7 +313,7 @@ version = "0.2.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8" dependencies = [ - "hermit-abi", + "hermit-abi 0.1.19", "libc", "winapi", ] @@ -333,16 +334,16 @@ dependencies = [ "cc", "cfg-if", "libc", - "miniz_oxide 0.6.2", + "miniz_oxide", "object 0.30.0", "rustc-demangle", ] [[package]] name = "base-x" -version = "0.2.8" +version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4521f3e3d031370679b3b140beb36dfe4801b09ac77e30c61941f97df3ef28b" +checksum = "4cbbc9d0964165b47557570cce6c952866c2678457aca742aafc9fb771d30270" [[package]] name = "base16ct" @@ -358,21 +359,21 @@ checksum = "6107fe1be6682a68940da878d9e9f5e90ca5745b3dec9fd1bb393c8777d4f581" [[package]] name = "base64" -version = "0.13.0" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "904dfeac50f3cdaba28fc6f57fdcddb75f49ed61346676a78c4ffe55877802fd" +checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" [[package]] name = "base64ct" -version = "1.5.1" +version = "1.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3bdca834647821e0b13d9539a8634eb62d3501b6b6c2cec1722786ee6671b851" +checksum = "b645a089122eccb6111b4f81cbc1a49f5900ac4666bb93ac027feaecf15607bf" [[package]] name = "beef" -version = "0.5.1" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bed554bd50246729a1ec158d08aa3235d1b69d94ad120ebe187e28894787e736" +checksum = "3a8241f3ebb85c056b509d4327ad0358fbbba6ffb340bf388f26350aeda225b1" dependencies = [ "serde", ] @@ -489,9 +490,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitvec" -version = "1.0.0" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1489fcb93a5bb47da0462ca93ad252ad6af2145cce58d10d46a83931ba9f016b" +checksum = "1bc2832c24239b0141d5674bb9174f9d68a8b5b3f2753311927c172ca46f7e9c" dependencies = [ "funty", "radium", @@ -501,11 +502,11 @@ dependencies = [ [[package]] name = "blake2" -version = "0.10.4" +version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9cf849ee05b2ee5fba5e36f97ff8ec2533916700fc0758d40d92136a42f3388" +checksum = "46502ad458c9a52b69d4d4d32775c788b7a1b85e8bc9d482d92250fc0e3f8efe" dependencies = [ - "digest 0.10.3", + "digest 0.10.6", ] [[package]] @@ -516,7 +517,7 @@ checksum = "72936ee4afc7f8f736d1c38383b56480b5497b4617b4a77bdbf1d2ababc76127" dependencies = [ "arrayref", "arrayvec 0.7.2", - "constant_time_eq", + "constant_time_eq 0.1.5", ] [[package]] @@ -527,20 +528,20 @@ checksum = "db539cc2b5f6003621f1cd9ef92d7ded8ea5232c7de0f9faa2de251cd98730d4" dependencies = [ "arrayref", "arrayvec 0.7.2", - "constant_time_eq", + "constant_time_eq 0.1.5", ] [[package]] name = "blake3" -version = "1.3.1" +version = "1.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a08e53fc5a564bb15bfe6fae56bd71522205f1f91893f9c0116edad6496c183f" +checksum = "42ae2468a89544a466886840aa467a25b766499f4f04bf7d9fcd10ecee9fccef" dependencies = [ "arrayref", "arrayvec 0.7.2", "cc", "cfg-if", - "constant_time_eq", + "constant_time_eq 0.2.4", ] [[package]] @@ -561,16 +562,16 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4152116fd6e9dadb291ae18fc1ec3575ed6d84c29642d97890f4b4a3417297e4" dependencies = [ - "generic-array 0.14.4", + "generic-array 0.14.6", ] [[package]] name = "block-buffer" -version = "0.10.0" +version = "0.10.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1d36a02058e76b040de25a4464ba1c80935655595b661505c8b39b664828b95" +checksum = "69cce20737498f97b993470a6e536b8523f0af7892a4f928cceb1ac5e52ebe7e" dependencies = [ - "generic-array 0.14.4", + "generic-array 0.14.6", ] [[package]] @@ -584,16 +585,16 @@ dependencies = [ [[package]] name = "blocking" -version = "1.0.2" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c5e170dbede1f740736619b776d7251cb1b9095c435c34d8ca9f57fcd2f335e9" +checksum = "3c67b173a56acffd6d2326fb7ab938ba0b00a71480e14902b2591c87bc5741e8" dependencies = [ "async-channel", + "async-lock", "async-task", "atomic-waker", "fastrand", "futures-lite", - "once_cell", ] [[package]] @@ -604,9 +605,9 @@ checksum = "771fe0050b883fcc3ea2359b1a96bcfbc090b7116eae7c3c512c7a083fdf23d3" [[package]] name = "bstr" -version = "0.2.15" +version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a40b47ad93e1a5404e6c18dec46b628214fee441c70f4ab5d6942142cc268a3d" +checksum = "ba3569f383e8f1598449f1a423e72e99569137b47740b1da11ef19af3d5c3223" dependencies = [ "lazy_static", "memchr", @@ -616,9 +617,9 @@ dependencies = [ [[package]] name = "bstr" -version = "1.0.1" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fca0852af221f458706eb0725c03e4ed6c46af9ac98e6a689d5e634215d594dd" +checksum = "b45ea9b00a7b3f2988e9a65ad3917e62123c38dba709b666506207be96d1790b" dependencies = [ "memchr", "once_cell", @@ -637,15 +638,15 @@ dependencies = [ [[package]] name = "bumpalo" -version = "3.6.1" +version = "3.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "63396b8a4b9de3f4fdfb320ab6080762242f66a8ef174c49d8e19b674db4cdbe" +checksum = "572f695136211188308f16ad2ca5c851a712c464060ae6974944458eb83880ba" [[package]] name = "byte-slice-cast" -version = "1.0.0" +version = "1.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "65c1bf4a04a88c54f589125563643d773f3254b5c38571395e2b591c693bbc81" +checksum = "c3ac9f8b63eca6fd385229b3675f6cc0dc5c8a5c8a54a59d4f52ffd670d87b0c" [[package]] name = "byte-tools" @@ -661,9 +662,9 @@ checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610" [[package]] name = "bytes" -version = "1.1.0" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4872d67bab6358e59559027aa3b9157c53d9358c51423c17554809a8858e0f8" +checksum = "dfb24e866b15a1af2a1b663f10c6b6b8f397a84aadb828f12e5b289ec23a3a3c" [[package]] name = "bzip2-sys" @@ -676,26 +677,20 @@ dependencies = [ "pkg-config", ] -[[package]] -name = "cache-padded" -version = "1.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "631ae5198c9be5e753e5cc215e1bd73c2b466a3565173db433f52bb9d3e66dba" - [[package]] name = "camino" -version = "1.0.4" +version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d4648c6d00a709aa069a236adcaae4f605a6241c72bf5bee79331a4b625921a9" +checksum = "88ad0e1e3e88dd237a156ab9f571021b8a158caa0ae44b1968a241efb5144c1e" dependencies = [ "serde", ] [[package]] name = "cargo-platform" -version = "0.1.1" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0226944a63d1bf35a3b5f948dd7c59e263db83695c9e8bffc4037de02e30f1d7" +checksum = "cbdb825da8a5df079a43676dbe042702f1707b1109f713a01420fbb4cc71fa27" dependencies = [ "serde", ] @@ -708,25 +703,22 @@ checksum = "4acbb09d9ee8e23699b9634375c72795d095bf268439da88562cf9b501f181fa" dependencies = [ "camino", "cargo-platform", - "semver 1.0.4", + "semver 1.0.16", "serde", "serde_json", ] [[package]] name = "cast" -version = "0.2.3" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4b9434b9a5aa1450faa3f9cb14ea0e8c53bb5d2b3c1bfd1ab4fc03e9f33fbfb0" -dependencies = [ - "rustc_version 0.2.3", -] +checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" [[package]] name = "cc" -version = "1.0.73" +version = "1.0.78" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2fff2a6927b3bb87f9595d67196a70493f627687a71d87a0d692242c33f58c11" +checksum = "a20104e2335ce8a659d6dd92a51a767a0c062599c73b343fd152cb401e828c3d" dependencies = [ "jobserver", ] @@ -791,7 +783,7 @@ name = "chain-spec-builder" version = "2.0.0" dependencies = [ "ansi_term", - "clap 4.0.11", + "clap 4.0.32", "node-cli", "rand 0.8.5", "sc-chain-spec", @@ -802,14 +794,16 @@ dependencies = [ [[package]] name = "chrono" -version = "0.4.19" +version = "0.4.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "670ad68c9088c2a963aaa298cb369688cf3f9465ce5e2d4ca10e6e0098a1ce73" +checksum = "16b0a3d9ed01224b22057780a37bb8c5dbfe1be8ba48678e7bf57ec4b385411f" dependencies = [ - "libc", + "iana-time-zone", + "js-sys", "num-integer", "num-traits", "time", + "wasm-bindgen", "winapi", ] @@ -832,7 +826,7 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7ee52072ec15386f770805afd189a01c8841be8696bed250fa2f13c4c0d6dfb7" dependencies = [ - "generic-array 0.14.4", + "generic-array 0.14.6", ] [[package]] @@ -846,9 +840,9 @@ dependencies = [ [[package]] name = "clang-sys" -version = "1.2.0" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "853eda514c284c2287f4bf20ae614f8781f40a81d32ecda6e91449304dfe077c" +checksum = "fa2e27ae6ab525c3d369ded447057bca5438d86dc3a68f6faafb8269ba82ebf3" dependencies = [ "glob", "libc", @@ -868,14 +862,14 @@ dependencies = [ [[package]] name = "clap" -version = "4.0.11" +version = "4.0.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ed45cc2c62a3eff523e718d8576ba762c83a3146151093283ac62ae11933a73" +checksum = "a7db700bc935f9e43e88d00b0850dae18a63773cfbec6d8e070fccf7fef89a39" dependencies = [ - "atty", "bitflags", "clap_derive", "clap_lex", + "is-terminal", "once_cell", "strsim", "termcolor", @@ -883,18 +877,18 @@ dependencies = [ [[package]] name = "clap_complete" -version = "4.0.5" +version = "4.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96b0fba905b035a30d25c1b585bf1171690712fbb0ad3ac47214963aa4acc36c" +checksum = "10861370d2ba66b0f5989f83ebf35db6421713fd92351790e7fdd6c36774c56b" dependencies = [ - "clap 4.0.11", + "clap 4.0.32", ] [[package]] name = "clap_derive" -version = "4.0.10" +version = "4.0.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db342ce9fda24fb191e2ed4e102055a4d381c1086a06630174cd8da8d5d917ce" +checksum = "0177313f9f02afc995627906bbd8967e2be069f5261954222dac78290c2b9014" dependencies = [ "heck", "proc-macro-error", @@ -924,9 +918,9 @@ dependencies = [ [[package]] name = "comfy-table" -version = "6.0.0" +version = "6.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "121d8a5b0346092c18a4b2fd6f620d7a06f0eb7ac0a45860939a0884bc579c56" +checksum = "e621e7e86c46fd8a14c32c6ae3cb95656621b4743a27d0cffedb831d46e7ad21" dependencies = [ "strum", "strum_macros", @@ -935,18 +929,18 @@ dependencies = [ [[package]] name = "concurrent-queue" -version = "1.2.2" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30ed07550be01594c6026cff2a1d7fe9c8f683caa798e12b68694ac9e88286a3" +checksum = "bd7bef69dc86e3c610e4e7aed41035e2a7ed12e72dd7530f61327a6579a4390b" dependencies = [ - "cache-padded", + "crossbeam-utils", ] [[package]] name = "const-oid" -version = "0.9.0" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "722e23542a15cea1f65d4a1419c4cfd7a26706c70871a13a04238ca3f40f1661" +checksum = "cec318a675afcb6a1ea1d4340e2d377e56e47c266f28043ceccbf4412ddfdd3b" [[package]] name = "constant_time_eq" @@ -954,6 +948,12 @@ version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "245097e9a4535ee1e3e3931fcfcd55a796a44c643e8596ff6566d68f09b87bbc" +[[package]] +name = "constant_time_eq" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f3ad85c1f65dc7b37604eb0e89748faf0b9653065f2a8ef69f96a687ec1e9279" + [[package]] name = "core-foundation" version = "0.9.3" @@ -981,43 +981,36 @@ dependencies = [ [[package]] name = "cpp_demangle" -version = "0.3.2" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44919ecaf6f99e8e737bc239408931c9a01e9a6c74814fee8242dd2506b65390" +checksum = "eeaa953eaad386a53111e47172c2fedba671e5684c8dd601a5f474f4f118710f" dependencies = [ "cfg-if", - "glob", ] [[package]] name = "cpufeatures" -version = "0.2.1" +version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95059428f66df56b63431fdb4e1947ed2190586af5c5a8a8b71122bdf5a7f469" +checksum = "28d997bd5e24a5928dd43e46dc529867e207907fe0b239c3477d924f7f2ca320" dependencies = [ "libc", ] -[[package]] -name = "cpuid-bool" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8aebca1129a03dc6dc2b127edd729435bbc4a37e1d5f4d7513165089ceb02634" - [[package]] name = "cranelift-bforest" -version = "0.88.0" +version = "0.88.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b27bbd3e6c422cf6282b047bcdd51ecd9ca9f3497a3be0132ffa08e509b824b0" +checksum = "52056f6d0584484b57fa6c1a65c1fcb15f3780d8b6a758426d9e3084169b2ddd" dependencies = [ "cranelift-entity", ] [[package]] name = "cranelift-codegen" -version = "0.88.0" +version = "0.88.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "872f5d4557a411b087bd731df6347c142ae1004e6467a144a7e33662e5715a01" +checksum = "18fed94c8770dc25d01154c3ffa64ed0b3ba9d583736f305fed7beebe5d9cf74" dependencies = [ "arrayvec 0.7.2", "bumpalo", @@ -1026,7 +1019,7 @@ dependencies = [ "cranelift-codegen-shared", "cranelift-entity", "cranelift-isle", - "gimli 0.26.1", + "gimli 0.26.2", "log", "regalloc2", "smallvec", @@ -1035,33 +1028,33 @@ dependencies = [ [[package]] name = "cranelift-codegen-meta" -version = "0.88.0" +version = "0.88.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21b49fdebb29c62c1fc4da1eeebd609e9d530ecde24a9876def546275f73a244" +checksum = "1c451b81faf237d11c7e4f3165eeb6bac61112762c5cfe7b4c0fb7241474358f" dependencies = [ "cranelift-codegen-shared", ] [[package]] name = "cranelift-codegen-shared" -version = "0.88.0" +version = "0.88.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5fc0c091e2db055d4d7f6b7cec2d2ead286bcfaea3357c6a52c2a2613a8cb5ac" +checksum = "e7c940133198426d26128f08be2b40b0bd117b84771fd36798969c4d712d81fc" [[package]] name = "cranelift-entity" -version = "0.88.0" +version = "0.88.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "354a9597be87996c9b278655e68b8447f65dd907256855ad773864edee8d985c" +checksum = "87a0f1b2fdc18776956370cf8d9b009ded3f855350c480c1c52142510961f352" dependencies = [ "serde", ] [[package]] name = "cranelift-frontend" -version = "0.88.0" +version = "0.88.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0cd8dd3fb8b82c772f4172e87ae1677b971676fffa7c4e3398e3047e650a266b" +checksum = "34897538b36b216cc8dd324e73263596d51b8cf610da6498322838b2546baf8a" dependencies = [ "cranelift-codegen", "log", @@ -1071,15 +1064,15 @@ dependencies = [ [[package]] name = "cranelift-isle" -version = "0.88.0" +version = "0.88.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b82527802b1f7d8da288adc28f1dc97ea52943f5871c041213f7b5035ac698a7" +checksum = "1b2629a569fae540f16a76b70afcc87ad7decb38dc28fa6c648ac73b51e78470" [[package]] name = "cranelift-native" -version = "0.88.0" +version = "0.88.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c30ba8b910f1be023af0c39109cb28a8809734942a6b3eecbf2de8993052ea5e" +checksum = "20937dab4e14d3e225c5adfc9c7106bafd4ac669bdb43027b911ff794c6fb318" dependencies = [ "cranelift-codegen", "libc", @@ -1088,9 +1081,9 @@ dependencies = [ [[package]] name = "cranelift-wasm" -version = "0.88.0" +version = "0.88.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "776a8916d201894aca9637a20814f1e11abc62acd5cfbe0b4eb2e63922756971" +checksum = "80fc2288957a94fd342a015811479de1837850924166d1f1856d8406e6f3609b" dependencies = [ "cranelift-codegen", "cranelift-entity", @@ -1104,18 +1097,18 @@ dependencies = [ [[package]] name = "crc32fast" -version = "1.2.1" +version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81156fece84ab6a9f2afdb109ce3ae577e42b1228441eded99bd77f627953b1a" +checksum = "b540bd8bc810d3885c6ea91e2018302f68baba2129ab3e88f32389ee9370880d" dependencies = [ "cfg-if", ] [[package]] name = "criterion" -version = "0.3.5" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1604dafd25fba2fe2d5895a9da139f8dc9b319a5fe5354ca137cbbce4e178d10" +checksum = "b01d6de93b2b6c65e17c634a26653a29d107b3c98c607c765bf38d041531cd8f" dependencies = [ "atty", "cast", @@ -1141,9 +1134,9 @@ dependencies = [ [[package]] name = "criterion-plot" -version = "0.4.4" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d00996de9f2f7559f7f4dc286073197f83e92256a59ed395f9aac01fe717da57" +checksum = "2673cc8207403546f45f5fd319a974b1e6983ad1a3ee7e6041650013be041876" dependencies = [ "cast", "itertools", @@ -1151,9 +1144,9 @@ dependencies = [ [[package]] name = "crossbeam-channel" -version = "0.5.0" +version = "0.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dca26ee1f8d361640700bde38b2c37d8c22b3ce2d360e1fc1c74ea4b0aa7d775" +checksum = "c2dd04ddaf88237dc3b8d8f9a3c1004b506b54b3313403944054d23c0870c521" dependencies = [ "cfg-if", "crossbeam-utils", @@ -1161,9 +1154,9 @@ dependencies = [ [[package]] name = "crossbeam-deque" -version = "0.8.1" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6455c0ca19f0d2fbf751b908d5c55c1f5cbc65e03c4225427254b46890bdde1e" +checksum = "715e8152b692bba2d374b53d4875445368fdf21a94751410af607a5ac677d1fc" dependencies = [ "cfg-if", "crossbeam-epoch", @@ -1172,25 +1165,24 @@ dependencies = [ [[package]] name = "crossbeam-epoch" -version = "0.9.5" +version = "0.9.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ec02e091aa634e2c3ada4a392989e7c3116673ef0ac5b72232439094d73b7fd" +checksum = "01a9af1f4c2ef74bb8aa1f7e19706bc72d03598c8a570bb5de72243c7a9d9d5a" dependencies = [ + "autocfg", "cfg-if", "crossbeam-utils", - "lazy_static", - "memoffset", + "memoffset 0.7.1", "scopeguard", ] [[package]] name = "crossbeam-utils" -version = "0.8.8" +version = "0.8.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0bf124c720b7686e3c2663cf54062ab0f68a88af2fb6a030e87e30bf721fcb38" +checksum = "4fb766fa798726286dbbb842f174001dab8abc7b627a1dd86e0b7222a95d929f" dependencies = [ "cfg-if", - "lazy_static", ] [[package]] @@ -1201,23 +1193,23 @@ checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7" [[package]] name = "crypto-bigint" -version = "0.4.8" +version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f2b443d17d49dad5ef0ede301c3179cc923b8822f3393b4d2c28c269dd4a122" +checksum = "ef2b4b23cddf68b89b8f8069890e8c270d54e2d5fe1b143820234805e4cb17ef" dependencies = [ - "generic-array 0.14.4", - "rand_core 0.6.2", + "generic-array 0.14.6", + "rand_core 0.6.4", "subtle", "zeroize", ] [[package]] name = "crypto-common" -version = "0.1.3" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57952ca27b5e3606ff4dd79b0020231aaf9d6aa76dc05fd30137538c50bd3ce8" +checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" dependencies = [ - "generic-array 0.14.4", + "generic-array 0.14.6", "typenum", ] @@ -1227,7 +1219,7 @@ version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b584a330336237c1eecd3e94266efb216c56ed91225d634cb2991c5f3fd1aeab" dependencies = [ - "generic-array 0.14.4", + "generic-array 0.14.6", "subtle", ] @@ -1237,7 +1229,7 @@ version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b1d1a86f49236c215f271d40892d5fc950490551400b02ef360692c29815c714" dependencies = [ - "generic-array 0.14.4", + "generic-array 0.14.6", "subtle", ] @@ -1247,7 +1239,7 @@ version = "1.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "22813a6dc45b335f9bade10bf7271dc477e81113e89eb251a0bc2a8a81c536e1" dependencies = [ - "bstr 0.2.15", + "bstr 0.2.17", "csv-core", "itoa 0.4.8", "ryu", @@ -1265,9 +1257,9 @@ dependencies = [ [[package]] name = "ctor" -version = "0.1.19" +version = "0.1.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8f45d9ad417bcef4817d614a501ab55cdd96a6fdb24f49aab89a54acfd66b19" +checksum = "6d2301688392eb071b0bf1a37be05c469d3cc4dbbd95df672fe28ab021e6a096" dependencies = [ "quote", "syn", @@ -1284,9 +1276,9 @@ dependencies = [ [[package]] name = "curve25519-dalek" -version = "2.1.2" +version = "2.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "434e1720189a637d44fe464f4df1e6eb900b4835255b14354497c78af37d9bb8" +checksum = "4a9b85542f99a2dfa2a1b8e192662741c9859a846b296bef1c92ef9b58b5a216" dependencies = [ "byteorder", "digest 0.8.1", @@ -1297,9 +1289,9 @@ dependencies = [ [[package]] name = "curve25519-dalek" -version = "3.0.2" +version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f627126b946c25a4638eec0ea634fc52506dea98db118aae985118ce7c3d723f" +checksum = "0b9fdf9972b2bd6af2d913799d9ebc165ea4d2e65878e329d9c6b372c4491b61" dependencies = [ "byteorder", "digest 0.9.0", @@ -1310,22 +1302,23 @@ dependencies = [ [[package]] name = "curve25519-dalek" -version = "4.0.0-pre.1" +version = "4.0.0-pre.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4033478fbf70d6acf2655ac70da91ee65852d69daf7a67bf7a2f518fb47aafcf" +checksum = "67bc65846be335cb20f4e52d49a437b773a2c1fdb42b19fc84e79e6f6771536f" dependencies = [ - "byteorder", - "digest 0.9.0", - "rand_core 0.6.2", + "cfg-if", + "fiat-crypto", + "packed_simd_2", + "platforms 3.0.2", "subtle", "zeroize", ] [[package]] name = "cxx" -version = "1.0.80" +version = "1.0.85" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b7d4e43b25d3c994662706a1d4fcfc32aaa6afd287502c111b237093bb23f3a" +checksum = "5add3fc1717409d029b20c5b6903fc0c0b02fa6741d820054f4a2efa5e5816fd" dependencies = [ "cc", "cxxbridge-flags", @@ -1335,9 +1328,9 @@ dependencies = [ [[package]] name = "cxx-build" -version = "1.0.80" +version = "1.0.85" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "84f8829ddc213e2c1368e51a2564c552b65a8cb6a28f31e576270ac81d5e5827" +checksum = "b4c87959ba14bc6fbc61df77c3fcfe180fc32b93538c4f1031dd802ccb5f2ff0" dependencies = [ "cc", "codespan-reporting", @@ -1350,15 +1343,15 @@ dependencies = [ [[package]] name = "cxxbridge-flags" -version = "1.0.80" +version = "1.0.85" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e72537424b474af1460806647c41d4b6d35d09ef7fe031c5c2fa5766047cc56a" +checksum = "69a3e162fde4e594ed2b07d0f83c6c67b745e7f28ce58c6df5e6b6bef99dfb59" [[package]] name = "cxxbridge-macro" -version = "1.0.80" +version = "1.0.85" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "309e4fb93eed90e1e14bea0da16b209f81813ba9fc7830c20ed151dd7bc0a4d7" +checksum = "3e7e2adeb6a0d4a282e581096b06e1791532b7d576dcde5ccd9382acf55db8e6" dependencies = [ "proc-macro2", "quote", @@ -1367,15 +1360,15 @@ dependencies = [ [[package]] name = "data-encoding" -version = "2.3.2" +version = "2.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ee2393c4a91429dffb4bedf19f4d6abf27d8a732c8ce4980305d782e5426d57" +checksum = "23d8666cb01533c39dde32bcbab8e227b4ed6679b2c925eba05feabea39508fb" [[package]] name = "data-encoding-macro" -version = "0.1.10" +version = "0.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a94feec3d2ba66c0b6621bca8bc6f68415b1e5c69af3586fdd0af9fd9f29b17" +checksum = "86927b7cd2fe88fa698b87404b287ab98d1a0063a34071d92e575b72d3029aca" dependencies = [ "data-encoding", "data-encoding-macro-internal", @@ -1383,9 +1376,9 @@ dependencies = [ [[package]] name = "data-encoding-macro-internal" -version = "0.1.9" +version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0f83e699727abca3c56e187945f303389590305ab2f0185ea445aa66e8d5f2a" +checksum = "a5bbed42daaa95e780b60a50546aa345b8413a1e46f9a40a12907d3598f038db" dependencies = [ "data-encoding", "syn", @@ -1393,9 +1386,9 @@ dependencies = [ [[package]] name = "der" -version = "0.6.0" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13dd2ae565c0a381dde7fade45fce95984c568bdcb4700a4fdbe3175e0380b2f" +checksum = "f1a467a65c5e759bce6e65eaf91cc29f466cdc57cb65777bd646872a8a1fd4de" dependencies = [ "const-oid", "zeroize", @@ -1414,9 +1407,9 @@ dependencies = [ [[package]] name = "diff" -version = "0.1.12" +version = "0.1.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e25ea47919b1560c4e3b7fe0aaab9becf5b84a10325ddf7db0f0ba5e1026499" +checksum = "56254986775e3233ffa9c4d7d3faaf6d36a2c09d30b20687e9f88bc8bafc16c8" [[package]] name = "difflib" @@ -1439,16 +1432,16 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d3dd60d1080a57a05ab032377049e0591415d2b31afd7028356dbf3cc6dcb066" dependencies = [ - "generic-array 0.14.4", + "generic-array 0.14.6", ] [[package]] name = "digest" -version = "0.10.3" +version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2fb860ca6fafa5552fb6d0e816a69c8e49f0908bf524e30a90d97c85892d506" +checksum = "8168378f4e5023e7218c89c891c0fd8ecdb5e5e4f18cb78f38cf245dd021e76f" dependencies = [ - "block-buffer 0.10.0", + "block-buffer 0.10.3", "crypto-common", "subtle", ] @@ -1474,9 +1467,9 @@ dependencies = [ [[package]] name = "dirs-sys" -version = "0.3.6" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03d86534ed367a67548dc68113a0f5db55432fdfbb6e6f9d77704397d95d5780" +checksum = "1b1d1d91c932ef41c0f2663aa8b0ca0342d444d842c06914aa0a7e352d0bada6" dependencies = [ "libc", "redox_users", @@ -1496,9 +1489,9 @@ dependencies = [ [[package]] name = "dissimilar" -version = "1.0.2" +version = "1.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc4b29f4b9bb94bf267d57269fd0706d343a160937108e9619fe380645428abb" +checksum = "bd5f0c7e4bd266b8ab2550e6238d2e74977c23c15536ac7be45e9c95e2e3fbbb" [[package]] name = "dns-parser" @@ -1530,9 +1523,9 @@ checksum = "9ea835d29036a4087793836fa931b08837ad5e957da9e23886b29586fb9b6650" [[package]] name = "dtoa" -version = "1.0.2" +version = "1.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5caaa75cbd2b960ff1e5392d2cfb1f44717fffe12fc1f32b7b5d1267f99732a6" +checksum = "c00704156a7de8df8da0911424e30c2049957b0a714542a44e05fe693dd85313" [[package]] name = "dyn-clonable" @@ -1557,15 +1550,15 @@ dependencies = [ [[package]] name = "dyn-clone" -version = "1.0.5" +version = "1.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21e50f3adc76d6a43f5ed73b698a87d0760ca74617f60f7c3b879003536fdd28" +checksum = "c9b0705efd4599c15a38151f4721f7bc388306f61084d3bfd50bd07fbca5cb60" [[package]] name = "ecdsa" -version = "0.14.7" +version = "0.14.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85789ce7dfbd0f0624c07ef653a08bb2ebf43d3e16531361f46d36dd54334fed" +checksum = "413301934810f597c1d19ca71c8710e99a3f1ba28a0d2ebc01551a2daeea3c5c" dependencies = [ "der", "elliptic-curve", @@ -1588,11 +1581,11 @@ version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c762bae6dcaf24c4c84667b8579785430908723d5c889f469d76a41d59cc7a9d" dependencies = [ - "curve25519-dalek 3.0.2", + "curve25519-dalek 3.2.0", "ed25519", "rand 0.7.3", "serde", - "sha2 0.9.8", + "sha2 0.9.9", "zeroize", ] @@ -1602,19 +1595,19 @@ version = "3.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7c24f403d068ad0b359e577a77f92392118be3f3c927538f2bb544a5ecd828c6" dependencies = [ - "curve25519-dalek 3.0.2", - "hashbrown 0.12.3", + "curve25519-dalek 3.2.0", + "hashbrown", "hex", - "rand_core 0.6.2", - "sha2 0.9.8", + "rand_core 0.6.4", + "sha2 0.9.9", "zeroize", ] [[package]] name = "either" -version = "1.6.1" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e78d4f1cc4ae33bbfc157ed5d5a5ef3bc29227303d595861deb238fcec4e9457" +checksum = "90e5c1c8368803113bf0c9584fc495a58b86dc8a29edbf8fe877d21d9507e797" [[package]] name = "elliptic-curve" @@ -1625,11 +1618,11 @@ dependencies = [ "base16ct", "crypto-bigint", "der", - "digest 0.10.3", + "digest 0.10.6", "ff", - "generic-array 0.14.4", + "generic-array 0.14.6", "group", - "rand_core 0.6.2", + "rand_core 0.6.4", "sec1", "subtle", "zeroize", @@ -1669,9 +1662,9 @@ dependencies = [ [[package]] name = "env_logger" -version = "0.9.0" +version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b2cf0344971ee6c64c31be0d530793fba457d322dfec2810c453d0ef228f9c3" +checksum = "a12e6657c4c97ebab115a42dcee77225f7f482cdd841cf7088c657a42e9e00e7" dependencies = [ "atty", "humantime", @@ -1682,9 +1675,9 @@ dependencies = [ [[package]] name = "environmental" -version = "1.1.3" +version = "1.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68b91989ae21441195d7d9b9993a2f9295c7e1a8c96255d8b729accddc124797" +checksum = "e48c92028aaa870e83d51c64e5d4e0b6981b360c522198c23959f219a4e1b15b" [[package]] name = "errno" @@ -1699,19 +1692,19 @@ dependencies = [ [[package]] name = "errno-dragonfly" -version = "0.1.1" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14ca354e36190500e1e1fb267c647932382b54053c50b14970856c0b00a35067" +checksum = "aa68f1b12764fab894d2755d2518754e71b4fd80ecfb822714a1206c2aab39bf" dependencies = [ - "gcc", + "cc", "libc", ] [[package]] name = "event-listener" -version = "2.5.1" +version = "2.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7531096570974c3a9dcf9e4b8e1cede1ec26cf5046219fb3b9d897503b9be59" +checksum = "0206175f82b8d6bf6652ff7d71a1e27fd2e4efde587fd368662814d6ec1d9ce0" [[package]] name = "exit-future" @@ -1736,9 +1729,9 @@ checksum = "4443176a9f2c162692bd3d352d745ef9413eec5782a80d8fd6f8a1ac692a07f7" [[package]] name = "fastrand" -version = "1.7.0" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3fcf0cee53519c866c09b5de1f6c56ff9d647101f81c1964fa632e148896cdf" +checksum = "a7a407cfaa3385c4ae6b23e84623d48c2798d06e3e6a1878f7f59f17b3f86499" dependencies = [ "instant", ] @@ -1754,14 +1747,20 @@ dependencies = [ [[package]] name = "ff" -version = "0.12.0" +version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df689201f395c6b90dfe87127685f8dbfc083a5e779e613575d8bd7314300c3e" +checksum = "d013fc25338cc558c5c2cfbad646908fb23591e2404481826742b651c9af7160" dependencies = [ - "rand_core 0.6.2", + "rand_core 0.6.4", "subtle", ] +[[package]] +name = "fiat-crypto" +version = "0.1.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a214f5bb88731d436478f3ae1f8a277b62124089ba9fb67f4f93fb100ef73c90" + [[package]] name = "file-per-thread-logger" version = "0.1.5" @@ -1774,14 +1773,14 @@ dependencies = [ [[package]] name = "filetime" -version = "0.2.16" +version = "0.2.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0408e2626025178a6a7f7ffc05a25bc47103229f19c113755de7bf63816290c" +checksum = "4e884668cd0c7480504233e951174ddc3b382f7c2666e3b7310b5c4e7b0c37f9" dependencies = [ "cfg-if", "libc", "redox_syscall", - "winapi", + "windows-sys 0.42.0", ] [[package]] @@ -1815,21 +1814,19 @@ dependencies = [ [[package]] name = "fixedbitset" -version = "0.4.0" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "398ea4fabe40b9b0d885340a2a991a44c8a645624075ad966d21f88688e2b69e" +checksum = "0ce7134b9999ecaf8bcd65542e436736ef32ddca1b3e06094cb6ec5755203b80" [[package]] name = "flate2" -version = "1.0.20" +version = "1.0.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd3aec53de10fe96d7d8c565eb17f2c687bb5518a2ec453b5b1252964526abe0" +checksum = "a8a2db397cb1c8772f31494cb8917e48cd1e64f0fa7efac59fbd741a0a8ce841" dependencies = [ - "cfg-if", "crc32fast", - "libc", "libz-sys", - "miniz_oxide 0.4.4", + "miniz_oxide", ] [[package]] @@ -1856,19 +1853,18 @@ dependencies = [ [[package]] name = "form_urlencoded" -version = "1.0.1" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5fc25a87fa4fd2094bffb06925852034d90a17f0d1e05197d4956d3555752191" +checksum = "a9c384f161156f5260c24a097c56119f9be8c798586aecc13afbcbe7b7e26bf8" dependencies = [ - "matches", "percent-encoding", ] [[package]] name = "fragile" -version = "1.2.1" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85dcb89d2b10c5f6133de2efd8c11959ce9dbb46a2f7a4cab208c4eeda6ce1ab" +checksum = "6c2141d6d6c8512188a7891b4b01590a45f6dac67afb4f255c4124dbb86d4eaa" [[package]] name = "frame-benchmarking" @@ -1902,7 +1898,7 @@ dependencies = [ "Inflector", "array-bytes", "chrono", - "clap 4.0.11", + "clap 4.0.32", "comfy-table", "frame-benchmarking", "frame-support", @@ -1979,7 +1975,7 @@ dependencies = [ name = "frame-election-solution-type-fuzzer" version = "2.0.0-alpha.5" dependencies = [ - "clap 4.0.11", + "clap 4.0.32", "frame-election-provider-solution-type", "frame-election-provider-support", "frame-support", @@ -2234,9 +2230,9 @@ checksum = "e6d5a32815ae3f33302d95fdcb2ce17862f8c65363dcfd29360480ba1001fc9c" [[package]] name = "futures" -version = "0.3.21" +version = "0.3.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f73fe65f54d1e12b726f517d3e2135ca3125a437b6d998caf1962961f7172d9e" +checksum = "38390104763dc37a5145a53c29c63c1290b5d316d6086ec32c293f6736051bb0" dependencies = [ "futures-channel", "futures-core", @@ -2249,9 +2245,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.21" +version = "0.3.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3083ce4b914124575708913bca19bfe887522d6e2e6d0952943f5eac4a74010" +checksum = "52ba265a92256105f45b719605a571ffe2d1f0fea3807304b522c1d778f79eed" dependencies = [ "futures-core", "futures-sink", @@ -2259,15 +2255,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.21" +version = "0.3.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c09fd04b7e4073ac7156a9539b57a484a8ea920f79c7c675d05d289ab6110d3" +checksum = "04909a7a7e4633ae6c4a9ab280aeb86da1236243a77b694a49eacd659a4bd3ac" [[package]] name = "futures-executor" -version = "0.3.21" +version = "0.3.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9420b90cfa29e327d0429f19be13e7ddb68fa1cccb09d65e5706b8c7a749b8a6" +checksum = "7acc85df6714c176ab5edf386123fafe217be88c0840ec11f199441134a074e2" dependencies = [ "futures-core", "futures-task", @@ -2277,15 +2273,15 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.21" +version = "0.3.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc4045962a5a5e935ee2fdedaa4e08284547402885ab326734432bed5d12966b" +checksum = "00f5fb52a06bdcadeb54e8d3671f8888a39697dcb0b81b23b55174030427f4eb" [[package]] name = "futures-lite" -version = "1.11.3" +version = "1.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4481d0cd0de1d204a4fa55e7d45f07b1d958abcb06714b3446438e2eff695fb" +checksum = "7694489acd39452c77daa48516b894c153f192c3578d5a839b62c58099fcbf48" dependencies = [ "fastrand", "futures-core", @@ -2298,9 +2294,9 @@ dependencies = [ [[package]] name = "futures-macro" -version = "0.3.21" +version = "0.3.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33c1e13800337f4d4d7a316bf45a567dbcb6ffe087f16424852d97e97a91f512" +checksum = "bdfb8ce053d86b91919aad980c220b1fb8401a9394410e1c289ed7e66b61835d" dependencies = [ "proc-macro2", "quote", @@ -2309,9 +2305,9 @@ dependencies = [ [[package]] name = "futures-rustls" -version = "0.22.1" +version = "0.22.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e01fe9932a224b72b45336d96040aa86386d674a31d0af27d800ea7bc8ca97fe" +checksum = "d2411eed028cdf8c8034eaf21f9915f956b6c3abec4d4c7949ee67f0721127bd" dependencies = [ "futures-io", "rustls", @@ -2320,15 +2316,15 @@ dependencies = [ [[package]] name = "futures-sink" -version = "0.3.21" +version = "0.3.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21163e139fa306126e6eedaf49ecdb4588f939600f0b1e770f4205ee4b7fa868" +checksum = "39c15cf1a4aa79df40f1bb462fb39676d0ad9e366c2a33b590d7c66f4f81fcf9" [[package]] name = "futures-task" -version = "0.3.21" +version = "0.3.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57c66a976bf5909d801bbef33416c41372779507e7a6b3a5e25e4749c58f776a" +checksum = "2ffb393ac5d9a6eaa9d3fdf37ae2776656b706e200c8e16b1bdb227f5198e6ea" [[package]] name = "futures-timer" @@ -2338,9 +2334,9 @@ checksum = "e64b03909df88034c26dc1547e8970b91f98bdb65165d6a4e9110d94263dbb2c" [[package]] name = "futures-util" -version = "0.3.21" +version = "0.3.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8b7abd5d659d9b90c8cba917f6ec750a74e2dc23902ef9cd4cc8c8b22e6036a" +checksum = "197676987abd2f9cadff84926f410af1c183608d36641465df73ae8211dc65d6" dependencies = [ "futures-channel", "futures-core", @@ -2363,12 +2359,6 @@ dependencies = [ "byteorder", ] -[[package]] -name = "gcc" -version = "0.3.55" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f5f3913fa0bfe7ee1fd8248b6b9f42a5af4b9d65ec2dd2c3c26132b950ecfc2" - [[package]] name = "generate-bags" version = "4.0.0-dev" @@ -2393,9 +2383,9 @@ dependencies = [ [[package]] name = "generic-array" -version = "0.14.4" +version = "0.14.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "501466ecc8a30d1d3b7fc9229b122b2ce8ed6e9d9223f1138d4babb253e51817" +checksum = "bff49e947297f3312447abdca79f45f4738097cc82b06e72054d2223f601f1b9" dependencies = [ "typenum", "version_check", @@ -2424,13 +2414,13 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.3" +version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fcd999463524c52659517fe2cea98493cfe485d10565e7b0fb07dbba7ad2753" +checksum = "c05aeb6a22b8f62540c194aac980f2115af067bfe15a0734d7277a768d396b31" dependencies = [ "cfg-if", "libc", - "wasi 0.10.0+wasi-snapshot-preview1", + "wasi 0.11.0+wasi-snapshot-preview1", ] [[package]] @@ -2445,9 +2435,9 @@ dependencies = [ [[package]] name = "gimli" -version = "0.26.1" +version = "0.26.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78cc372d058dcf6d5ecd98510e7fbc9e5aec4d21de70f65fea8fecebcd881bd4" +checksum = "22030e2c5a68ec659fde1e949a745124b48e6fa8b045b7ed5bd1fe4ccc5c4e5d" dependencies = [ "fallible-iterator", "indexmap", @@ -2462,9 +2452,9 @@ checksum = "dec7af912d60cdbd3677c1af9352ebae6fb8394d165568a2234df0fa00f87793" [[package]] name = "git2" -version = "0.14.2" +version = "0.14.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3826a6e0e2215d7a41c2bfc7c9244123969273f3476b939a226aac0ab56e9e3c" +checksum = "d0155506aab710a86160ddb504a480d2964d7ab5b9e62419be69e0032bc5931c" dependencies = [ "bitflags", "libc", @@ -2481,12 +2471,12 @@ checksum = "9b919933a397b79c37e33b77bb2aa3dc8eb6e165ad809e58ff75bc7db2e34574" [[package]] name = "globset" -version = "0.4.6" +version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c152169ef1e421390738366d2f796655fec62621dabbd0fd476f905934061e4a" +checksum = "0a1e17342619edbc21a964c2afbeb6c820c6a2560032872f397bb97ea127bd0a" dependencies = [ "aho-corasick", - "bstr 0.2.15", + "bstr 0.2.17", "fnv", "log", "regex", @@ -2494,20 +2484,20 @@ dependencies = [ [[package]] name = "group" -version = "0.12.0" +version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7391856def869c1c81063a03457c676fbcd419709c3dfb33d8d319de484b154d" +checksum = "5dfbfb3a6cfbd390d5c9564ab283a0349b9b9fcd46a706c1eb10e0db70bfbac7" dependencies = [ "ff", - "rand_core 0.6.2", + "rand_core 0.6.4", "subtle", ] [[package]] name = "h2" -version = "0.3.13" +version = "0.3.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37a82c6d637fc9515a4694bbf1cb2457b79d81ce52b3108bdeea58b07dd34a57" +checksum = "5f9f29bc9dda355256b2916cf526ab02ce0aeaaaf2bad60d65ef3f12f11dd0f4" dependencies = [ "bytes", "fnv", @@ -2518,21 +2508,21 @@ dependencies = [ "indexmap", "slab", "tokio", - "tokio-util 0.7.4", + "tokio-util", "tracing", ] [[package]] name = "half" -version = "1.7.1" +version = "1.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62aca2aba2d62b4a7f5b33f3712cb1b0692779a56fb510499d5c0aa594daeaf3" +checksum = "eabb4a44450da02c90444cf74558da904edde8fb4e9035a9a6a4e15445af0bd7" [[package]] name = "handlebars" -version = "4.3.5" +version = "4.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "433e4ab33f1213cdc25b5fa45c76881240cfe79284cf2b395e8b9e312a30a2fd" +checksum = "035ef95d03713f2c347a72547b7cd38cbc9af7cd51e6099fb62d586d4a6dee3a" dependencies = [ "log", "pest", @@ -2557,12 +2547,6 @@ dependencies = [ "crunchy", ] -[[package]] -name = "hashbrown" -version = "0.11.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab5ef0d4909ef3724cc8cce6ccc8572c5c817592e9285f5464f8e86f8bd3726e" - [[package]] name = "hashbrown" version = "0.12.3" @@ -2580,9 +2564,18 @@ checksum = "2540771e65fc8cb83cd6e8a237f70c319bd5c29f78ed1084ba5d50eeac86f7f9" [[package]] name = "hermit-abi" -version = "0.1.18" +version = "0.1.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33" +dependencies = [ + "libc", +] + +[[package]] +name = "hermit-abi" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "322f4de77956e22ed0e5032c359a0f1273f1f7f0d79bfa3b8ffbc730d7fbcc5c" +checksum = "ee512640fe35acbfb4bb779db6f0d80704c2cacfa2e39b601ef3e3f47d1ae4c7" dependencies = [ "libc", ] @@ -2619,7 +2612,7 @@ version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6c49c37c09c17a53d937dfbb742eb3a961d65a994e6bcdcf37e7399d0cc8ab5e" dependencies = [ - "digest 0.10.3", + "digest 0.10.6", ] [[package]] @@ -2629,7 +2622,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "17ea0a1394df5b6574da6e0c1ade9e78868c9fb0a4e5ef4428e32da4676b85b1" dependencies = [ "digest 0.9.0", - "generic-array 0.14.4", + "generic-array 0.14.6", "hmac 0.8.1", ] @@ -2664,7 +2657,7 @@ checksum = "75f43d41e26995c17e71ee126451dd3941010b0514a81a9d11f3b341debc2399" dependencies = [ "bytes", "fnv", - "itoa 1.0.4", + "itoa 1.0.5", ] [[package]] @@ -2692,9 +2685,9 @@ checksum = "d897f394bad6a705d5f4104762e116a75639e470d80901eed05a860a95cb1904" [[package]] name = "httpdate" -version = "1.0.1" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6456b8a6c8f33fee7d958fcd1b60d55b11940a79e63ae87013e6d22e26034440" +checksum = "c4a1e36c821dbe04574f602848a19f742f4fb3c98d40449f11bcad18d6b17421" [[package]] name = "humantime" @@ -2704,9 +2697,9 @@ checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" [[package]] name = "hyper" -version = "0.14.22" +version = "0.14.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "abfba89e19b959ca163c7752ba59d737c1ceea53a5d31a149c805446fc958064" +checksum = "034711faac9d2166cb1baf1a2fb0b60b1f277f8492fd72176c17f3515e1abd3c" dependencies = [ "bytes", "futures-channel", @@ -2717,7 +2710,7 @@ dependencies = [ "http-body", "httparse", "httpdate", - "itoa 1.0.4", + "itoa 1.0.5", "pin-project-lite 0.2.9", "socket2", "tokio", @@ -2728,9 +2721,9 @@ dependencies = [ [[package]] name = "hyper-rustls" -version = "0.23.0" +version = "0.23.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d87c48c02e0dc5e3b849a2041db3029fd066650f8f717c07bf8ed78ccb895cac" +checksum = "1788965e61b367cd03a62950836d5cd41560c3577d90e40e0819373194d1661c" dependencies = [ "http", "hyper", @@ -2741,6 +2734,30 @@ dependencies = [ "tokio-rustls", ] +[[package]] +name = "iana-time-zone" +version = "0.1.53" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "64c122667b287044802d6ce17ee2ddf13207ed924c712de9a66a5814d5b64765" +dependencies = [ + "android_system_properties", + "core-foundation-sys", + "iana-time-zone-haiku", + "js-sys", + "wasm-bindgen", + "winapi", +] + +[[package]] +name = "iana-time-zone-haiku" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0703ae284fc167426161c2e3f1da3ea71d94b21bedbcc9494e92b28e334e3dca" +dependencies = [ + "cxx", + "cxx-build", +] + [[package]] name = "idna" version = "0.2.3" @@ -2752,6 +2769,16 @@ dependencies = [ "unicode-normalization", ] +[[package]] +name = "idna" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e14ddfc70884202db2244c223200c204c2bda1bc6e0998d11b5e024d657209e6" +dependencies = [ + "unicode-bidi", + "unicode-normalization", +] + [[package]] name = "if-addrs" version = "0.7.0" @@ -2811,12 +2838,12 @@ dependencies = [ [[package]] name = "indexmap" -version = "1.8.0" +version = "1.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "282a6247722caba404c065016bbfa522806e51714c34f5dfc3e4a3a46fcb4223" +checksum = "1885e79c1fc4b10f0e172c475f458b7f7b93061064d98c3293e98c5ba0c8b399" dependencies = [ "autocfg", - "hashbrown 0.11.2", + "hashbrown", "serde", ] @@ -2846,9 +2873,19 @@ dependencies = [ [[package]] name = "io-lifetimes" -version = "0.7.2" +version = "0.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24c3f4eff5495aee4c0399d7b6a0dc2b6e81be84242ffbfcf253ebacccc1d0cb" +checksum = "59ce5ef949d49ee85593fc4d3f3f95ad61657076395cbbce23e2121fc5542074" + +[[package]] +name = "io-lifetimes" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "46112a93252b123d31a119a8d1a1ac19deac4fac6e0e8b0df58f0d4e5870e63c" +dependencies = [ + "libc", + "windows-sys 0.42.0", +] [[package]] name = "ip_network" @@ -2858,9 +2895,9 @@ checksum = "aa2f047c0a98b2f299aa5d6d7088443570faae494e9ae1305e48be000c9e0eb1" [[package]] name = "ipconfig" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "723519edce41262b05d4143ceb95050e4c614f483e78e9fd9e39a8275a84ad98" +checksum = "bd302af1b90f2463a98fa5ad469fc212c8e3175a41c3068601bfa2727591c5be" dependencies = [ "socket2", "widestring", @@ -2870,9 +2907,21 @@ dependencies = [ [[package]] name = "ipnet" -version = "2.5.0" +version = "2.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "879d54834c8c76457ef4293a689b2a8c59b076067ad77b15efafbb05f92a592b" +checksum = "11b0d96e660696543b251e58030cf9787df56da39dab19ad60eae7353040917e" + +[[package]] +name = "is-terminal" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "28dfb6c8100ccc63462345b67d1bbc3679177c75ee4bf59bf29c8b1d110b8189" +dependencies = [ + "hermit-abi 0.2.6", + "io-lifetimes 1.0.3", + "rustix 0.36.6", + "windows-sys 0.42.0", +] [[package]] name = "itertools" @@ -2891,24 +2940,24 @@ checksum = "b71991ff56294aa922b450139ee08b3bfc70982c6b2c7562771375cf73542dd4" [[package]] name = "itoa" -version = "1.0.4" +version = "1.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4217ad341ebadf8d8e724e264f13e593e0648f5b3e94b3896a5df283be015ecc" +checksum = "fad582f4b9e86b6caa621cabeb0963332d92eea04729ab12892c2533951e6440" [[package]] name = "jobserver" -version = "0.1.21" +version = "0.1.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c71313ebb9439f74b00d9d2dcec36440beaf57a6aa0623068441dd7cd81a7f2" +checksum = "068b1ee6743e4d11fb9c6a1e6064b3693a1b600e7f5f5988047d98b3dc9fb90b" dependencies = [ "libc", ] [[package]] name = "js-sys" -version = "0.3.54" +version = "0.3.60" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1866b355d9c878e5e607473cbe3f63282c0b7aad2db1dbebf55076c686918254" +checksum = "49409df3e3bf0856b916e2ceaca09ee28e6871cf7d9ce97a692cacfdb2a25a47" dependencies = [ "wasm-bindgen", ] @@ -2943,7 +2992,7 @@ dependencies = [ "thiserror", "tokio", "tokio-rustls", - "tokio-util 0.7.4", + "tokio-util", "tracing", "webpki-roots", ] @@ -3006,7 +3055,7 @@ dependencies = [ "soketto", "tokio", "tokio-stream", - "tokio-util 0.7.4", + "tokio-util", "tower", "tracing", ] @@ -3039,21 +3088,24 @@ dependencies = [ [[package]] name = "k256" -version = "0.11.5" +version = "0.11.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3636d281d46c3b64182eb3a0a42b7b483191a2ecc3f05301fa67403f7c9bc949" +checksum = "72c1e0b51e7ec0a97369623508396067a486bd0cbed95a2659a4b863d28cfc8b" dependencies = [ "cfg-if", "ecdsa", "elliptic-curve", - "sha2 0.10.2", + "sha2 0.10.6", ] [[package]] name = "keccak" -version = "0.1.0" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67c21572b4949434e4fc1e1978b99c5f77064153c59d998bf13ecd96fb5ecba7" +checksum = "3afef3b6eff9ce9d8ff9b3601125eec7f0c8cbac7abd14f355d053fa56c98768" +dependencies = [ + "cpufeatures", +] [[package]] name = "keccak-hasher" @@ -3108,7 +3160,7 @@ dependencies = [ "pallet-message-queue", "pallet-mmr", "pallet-multisig", - "pallet-nft-fractionalisation", + "pallet-nft-fractionalization", "pallet-nfts", "pallet-nis", "pallet-nomination-pools", @@ -3209,21 +3261,21 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "leb128" -version = "0.2.4" +version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3576a87f2ba00f6f106fdfcd16db1d698d648a26ad8e0573cad8537c3c362d2a" +checksum = "884e2677b40cc8c339eaefcb701c32ef1fd2493d71118dc0ca4b6a736c93bd67" [[package]] name = "libc" -version = "0.2.126" +version = "0.2.139" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "349d5a591cd28b49e1d1037471617a32ddcda5731b99419008085f72d5a53836" +checksum = "201de327520df007757c1f0adce6e827fe8562fbc28bfd9c15571c66ca1f5f79" [[package]] name = "libgit2-sys" -version = "0.13.2+1.4.2" +version = "0.13.4+1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3a42de9a51a5c12e00fc0e4ca6bc2ea43582fc6418488e8f615e905d886f258b" +checksum = "d0fa6563431ede25f5cc7f6d803c6afbc1c5d3ad3d4925d12c882bf2b526f5d1" dependencies = [ "cc", "libc", @@ -3233,9 +3285,9 @@ dependencies = [ [[package]] name = "libloading" -version = "0.7.0" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f84d96438c15fcd6c3f244c8fce01d1e2b9c6b5623e9c711dc9286d8fc92d6a" +checksum = "b67380fd3b2fbe7527a606e18729d21c6f3951633d0500574c4dc22d2d638b9f" dependencies = [ "cfg-if", "winapi", @@ -3243,9 +3295,15 @@ dependencies = [ [[package]] name = "libm" -version = "0.2.1" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c7d73b3f436185384286bd8098d17ec07c9a7d2388a6599f824d8502b529702a" +checksum = "7fc7aa29613bd6a620df431842069224d8bc9011086b1db4c0e0cd47fa03ec9a" + +[[package]] +name = "libm" +version = "0.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "348108ab3fba42ec82ff6e9564fc4ca0247bdccdc68dd8af9764bbc79c3c8ffb" [[package]] name = "libp2p" @@ -3256,7 +3314,7 @@ dependencies = [ "bytes", "futures", "futures-timer", - "getrandom 0.2.3", + "getrandom 0.2.8", "instant", "lazy_static", "libp2p-core", @@ -3306,7 +3364,7 @@ dependencies = [ "prost-build", "rand 0.8.5", "rw-stream-sink", - "sha2 0.10.2", + "sha2 0.10.6", "smallvec", "thiserror", "unsigned-varint", @@ -3369,7 +3427,7 @@ dependencies = [ "prost", "prost-build", "rand 0.8.5", - "sha2 0.10.2", + "sha2 0.10.6", "smallvec", "thiserror", "uint", @@ -3436,7 +3494,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "048155686bd81fe6cb5efdef0c6290f25ad32a0a42e8f4f72625cf6a505a206f" dependencies = [ "bytes", - "curve25519-dalek 3.0.2", + "curve25519-dalek 3.2.0", "futures", "lazy_static", "libp2p-core", @@ -3444,7 +3502,7 @@ dependencies = [ "prost", "prost-build", "rand 0.8.5", - "sha2 0.10.2", + "sha2 0.10.6", "snow", "static_assertions", "x25519-dalek", @@ -3567,9 +3625,9 @@ dependencies = [ [[package]] name = "libp2p-yamux" -version = "0.41.0" +version = "0.41.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30f079097a21ad017fc8139460630286f02488c8c13b26affb46623aa20d8845" +checksum = "0d6874d66543c4f7e26e3b8ca9a6bead351563a13ab4fafd43c7927f7c0d6c12" dependencies = [ "futures", "libp2p-core", @@ -3596,9 +3654,9 @@ dependencies = [ [[package]] name = "libsecp256k1" -version = "0.7.0" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0452aac8bab02242429380e9b2f94ea20cea2b37e2c1777a1358799bbe97f37" +checksum = "95b09eff1b35ed3b33b877ced3a691fc7a481919c7e29c53c906226fcf55e2a1" dependencies = [ "arrayref", "base64", @@ -3609,7 +3667,7 @@ dependencies = [ "libsecp256k1-gen-genmult", "rand 0.8.5", "serde", - "sha2 0.9.8", + "sha2 0.9.9", "typenum", ] @@ -3644,9 +3702,9 @@ dependencies = [ [[package]] name = "libz-sys" -version = "1.1.2" +version = "1.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "602113192b08db8f38796c4e85c39e960c145965140e918018bcde1952429655" +checksum = "9702761c3935f8cc2f101793272e202c72b99da8f4224a19ddcf1279a6450bbf" dependencies = [ "cc", "libc", @@ -3656,18 +3714,18 @@ dependencies = [ [[package]] name = "link-cplusplus" -version = "1.0.7" +version = "1.0.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9272ab7b96c9046fbc5bc56c06c117cb639fe2d509df0c421cad82d2915cf369" +checksum = "ecd207c9c713c34f95a097a5b029ac2ce6010530c7b49d7fea24d977dede04f5" dependencies = [ "cc", ] [[package]] name = "linked-hash-map" -version = "0.5.4" +version = "0.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fb9b38af92608140b86b693604b9ffcc5824240a484d1ecd4795bacb2fe88f3" +checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f" [[package]] name = "linked_hash_set" @@ -3694,6 +3752,12 @@ version = "0.0.46" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d4d2456c373231a208ad294c33dc5bff30051eafd954cd4caae83a712b12854d" +[[package]] +name = "linux-raw-sys" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f051f77a7c8e6957c0696eac88f26b0117e54f52d3fc682ab19397a8812846a4" + [[package]] name = "lite-json" version = "0.2.0" @@ -3714,10 +3778,11 @@ dependencies = [ [[package]] name = "lock_api" -version = "0.4.6" +version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "88943dd7ef4a2e5a4bfa2753aaab3013e34ce2533d1996fb18ef591e315e2b3b" +checksum = "435011366fe56583b16cf956f9df0095b405b82d76425bc8981c0e22e60ec4df" dependencies = [ + "autocfg", "scopeguard", ] @@ -3736,7 +3801,7 @@ version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b6e8aaa3f231bb4bd57b84b2d5dc3ae7f350265df8aa96492e0bc394a1571909" dependencies = [ - "hashbrown 0.12.3", + "hashbrown", ] [[package]] @@ -3803,48 +3868,57 @@ dependencies = [ [[package]] name = "matches" -version = "0.1.8" +version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ffc5c5338469d4d3ea17d269fa8ea3512ad247247c30bd2df69e68309ed0a08" +checksum = "a3e378b66a060d48947b590737b30a1be76706c8dd7b8ba0f2fe3989c68a853f" [[package]] name = "matrixmultiply" -version = "0.3.1" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a8a15b776d9dfaecd44b03c5828c2199cddff5247215858aac14624f8d6b741" +checksum = "add85d4dd35074e6fedc608f8c8f513a3548619a9024b751949ef0e8e45a4d84" dependencies = [ "rawpointer", ] [[package]] name = "memchr" -version = "2.4.1" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "308cc39be01b73d0d18f82a0e7b2a3df85245f84af96fdddc5d202d27e47b86a" +checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d" [[package]] name = "memfd" -version = "0.6.1" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "480b5a5de855d11ff13195950bdc8b98b5e942ef47afc447f6615cdcc4e15d80" +checksum = "b20a59d985586e4a5aef64564ac77299f8586d8be6cf9106a5a40207e8908efb" dependencies = [ - "rustix", + "rustix 0.36.6", ] [[package]] name = "memmap2" -version = "0.5.0" +version = "0.5.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4647a11b578fead29cdbb34d4adef8dd3dc35b876c9c6d5240d83f205abfe96e" +checksum = "4b182332558b18d807c4ce1ca8ca983b34c3ee32765e47b3f0f69b90355cc1dc" dependencies = [ "libc", ] [[package]] name = "memoffset" -version = "0.6.4" +version = "0.6.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59accc507f1338036a0477ef61afdae33cde60840f4dfe481319ce3ad116ddf9" +checksum = "5aa361d4faea93603064a027415f07bd8e1d5c88c9fbf68bf56a285428fd79ce" +dependencies = [ + "autocfg", +] + +[[package]] +name = "memoffset" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5de893c32cde5f383baa4c04c5d6dbdd735cfd4a794b0debdb2bb1b421da5ff4" dependencies = [ "autocfg", ] @@ -3856,7 +3930,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5e0c7cba9ce19ac7ffd2053ac9f49843bbd3f4318feedfd74e85c19d5fb0ba66" dependencies = [ "hash-db", - "hashbrown 0.12.3", + "hashbrown", ] [[package]] @@ -3883,16 +3957,6 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" -[[package]] -name = "miniz_oxide" -version = "0.4.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a92518e98c078586bc6c934028adcca4c92a53d6a958196de835170a01d84e4b" -dependencies = [ - "adler", - "autocfg", -] - [[package]] name = "miniz_oxide" version = "0.6.2" @@ -3904,14 +3968,14 @@ dependencies = [ [[package]] name = "mio" -version = "0.8.4" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57ee1c23c7c63b0c9250c339ffdc69255f110b298b901b9f6c82547b7b87caaf" +checksum = "e5d732bc30207a6423068df043e3d02e0735b155ad7ce1a6f76fe2baa5b158de" dependencies = [ "libc", "log", "wasi 0.11.0+wasi-snapshot-preview1", - "windows-sys 0.36.1", + "windows-sys 0.42.0", ] [[package]] @@ -3955,9 +4019,9 @@ dependencies = [ [[package]] name = "mockall" -version = "0.11.2" +version = "0.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2be9a9090bc1cac2930688fa9478092a64c6a92ddc6ae0692d46b37d9cab709" +checksum = "50e4a1c770583dac7ab5e2f6c139153b783a53a1bbee9729613f193e59828326" dependencies = [ "cfg-if", "downcast", @@ -3970,9 +4034,9 @@ dependencies = [ [[package]] name = "mockall_derive" -version = "0.11.2" +version = "0.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86d702a0530a0141cf4ed147cf5ec7be6f2c187d4e37fcbefc39cf34116bfe8f" +checksum = "832663583d5fa284ca8810bf7015e46c9fff9622d3cf34bd1eea5003fec06dd0" dependencies = [ "cfg-if", "proc-macro2", @@ -4011,17 +4075,17 @@ dependencies = [ [[package]] name = "multihash" -version = "0.16.2" +version = "0.16.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3db354f401db558759dfc1e568d010a5d4146f4d3f637be1275ec4a3cf09689" +checksum = "1c346cf9999c631f002d8f977c4eaeaa0e6386f16007202308d0b3757522c2cc" dependencies = [ "blake2b_simd", "blake2s_simd", "blake3", "core2", - "digest 0.10.3", + "digest 0.10.6", "multihash-derive", - "sha2 0.10.2", + "sha2 0.10.6", "sha3", "unsigned-varint", ] @@ -4042,15 +4106,15 @@ dependencies = [ [[package]] name = "multimap" -version = "0.8.2" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1255076139a83bb467426e7f8d0134968a8118844faa755985e077cf31850333" +checksum = "e5ce46fe64a9d73be07dcbe690a38ce1b293be448fd8ce1e6c1b8062c9f72c6a" [[package]] name = "multistream-select" -version = "0.12.0" +version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9bc41247ec209813e2fd414d6e16b9d94297dacf3cd613fa6ef09cd4d9755c10" +checksum = "c8552ab875c1313b97b8d20cb857b9fd63e2d1d6a0a1b53ce9821e575405f27a" dependencies = [ "bytes", "futures", @@ -4166,22 +4230,22 @@ dependencies = [ [[package]] name = "nix" -version = "0.23.1" +version = "0.23.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f866317acbd3a240710c63f065ffb1e4fd466259045ccb504130b7f668f35c6" +checksum = "8f3790c00a0150112de0f4cd161e3d7fc4b2d8a5542ffc35f099a2562aecb35c" dependencies = [ "bitflags", "cc", "cfg-if", "libc", - "memoffset", + "memoffset 0.6.5", ] [[package]] name = "nix" -version = "0.24.2" +version = "0.24.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "195cdbc1741b8134346d515b3a56a1c94b0912758009cfd53f99ea0f57b065fc" +checksum = "fa52e972a9a719cecb6864fb88568781eb706bac2cd1d4f04a648542dbf78069" dependencies = [ "bitflags", "cfg-if", @@ -4193,7 +4257,7 @@ name = "node-bench" version = "0.9.0-dev" dependencies = [ "array-bytes", - "clap 4.0.11", + "clap 4.0.32", "derive_more", "fs_extra", "futures", @@ -4230,7 +4294,7 @@ version = "3.0.0-dev" dependencies = [ "array-bytes", "assert_cmd", - "clap 4.0.11", + "clap 4.0.32", "clap_complete", "criterion", "frame-benchmarking-cli", @@ -4240,7 +4304,7 @@ dependencies = [ "jsonrpsee", "kitchensink-runtime", "log", - "nix 0.23.1", + "nix 0.23.2", "node-executor", "node-inspect", "node-primitives", @@ -4252,7 +4316,7 @@ dependencies = [ "pallet-timestamp", "pallet-transaction-payment", "parity-scale-codec", - "platforms", + "platforms 2.0.0", "rand 0.8.5", "regex", "sc-authority-discovery", @@ -4305,7 +4369,7 @@ dependencies = [ "substrate-rpc-client", "tempfile", "tokio", - "tokio-util 0.7.4", + "tokio-util", "try-runtime-cli", "wait-timeout", ] @@ -4350,7 +4414,7 @@ dependencies = [ name = "node-inspect" version = "0.9.0-dev" dependencies = [ - "clap 4.0.11", + "clap 4.0.32", "parity-scale-codec", "sc-cli", "sc-client-api", @@ -4409,7 +4473,7 @@ dependencies = [ name = "node-runtime-generate-bags" version = "3.0.0" dependencies = [ - "clap 4.0.11", + "clap 4.0.32", "generate-bags", "kitchensink-runtime", ] @@ -4418,7 +4482,7 @@ dependencies = [ name = "node-template" version = "4.0.0-dev" dependencies = [ - "clap 4.0.11", + "clap 4.0.32", "frame-benchmarking", "frame-benchmarking-cli", "frame-system", @@ -4537,13 +4601,12 @@ checksum = "2bf50223579dc7cdcfb3bfcacf7069ff68243f8c363f62ffa99cf000a6b9c451" [[package]] name = "nom" -version = "7.1.0" +version = "7.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b1d11e1ef389c76fe5b81bcaf2ea32cf88b62bc494e19f493d0b30e7a930109" +checksum = "a8903e5a29a317527874d0402f867152a3d21c908bb0b933e416c65e301d4c36" dependencies = [ "memchr", "minimal-lexical", - "version_check", ] [[package]] @@ -4575,28 +4638,28 @@ dependencies = [ [[package]] name = "num-complex" -version = "0.4.0" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26873667bbbb7c5182d4a37c1add32cdf09f841af72da53318fdb81543c15085" +checksum = "7ae39348c8bc5fbd7f40c727a9925f03517afd2ab27d46702108b6a7e5414c19" dependencies = [ "num-traits", ] [[package]] name = "num-format" -version = "0.4.3" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "54b862ff8df690cf089058c98b183676a7ed0f974cc08b426800093227cbff3b" +checksum = "a652d9771a63711fd3c3deb670acfbe5c30a4072e664d7a3bf5a9e1056ac72c3" dependencies = [ "arrayvec 0.7.2", - "itoa 1.0.4", + "itoa 1.0.5", ] [[package]] name = "num-integer" -version = "0.1.44" +version = "0.1.45" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2cc698a63b549a70bc047073d2949cce27cd1c7b0a4a862d08a8031bc2801db" +checksum = "225d3389fb3509a24c93f5c29eb6bde2586b98d9f016636dff58d7c6f7569cd9" dependencies = [ "autocfg", "num-traits", @@ -4616,21 +4679,21 @@ dependencies = [ [[package]] name = "num-traits" -version = "0.2.14" +version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a64b1ec5cda2586e284722486d802acf1f7dbdc623e2bfc57e65ca1cd099290" +checksum = "578ede34cf02f8924ab9447f50c28075b4d3e5b269972345e7e0372b38c6cdcd" dependencies = [ "autocfg", - "libm", + "libm 0.2.6", ] [[package]] name = "num_cpus" -version = "1.13.1" +version = "1.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19e64526ebdee182341572e50e9ad03965aa510cd94427a4549448f285e957a1" +checksum = "0fac9e2da13b5eb447a6ce3d392f23a29d8694bff781bf03a16cd9ac8697593b" dependencies = [ - "hermit-abi", + "hermit-abi 0.2.6", "libc", ] @@ -4641,7 +4704,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "21158b2c33aa6d4561f1c0a6ea283ca92bc54802a93b263e910746d679a7eb53" dependencies = [ "crc32fast", - "hashbrown 0.12.3", + "hashbrown", "indexmap", "memchr", ] @@ -4657,9 +4720,9 @@ dependencies = [ [[package]] name = "once_cell" -version = "1.16.0" +version = "1.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86f0b0d4bf799edbc74508c1e8bf170ff5f41238e5f8225603ca7caaae2b7860" +checksum = "6f61fba1741ea2b3d6a1e3178721804bb716a68a6aeba1149b5d52e3d464ea66" [[package]] name = "oorandom" @@ -4681,21 +4744,21 @@ checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" [[package]] name = "openssl-probe" -version = "0.1.2" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77af24da69f9d9341038eba93a073b1fdaaa1b788221b00a69bce9e762cb32de" +checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" [[package]] name = "os_str_bytes" -version = "6.0.0" +version = "6.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e22443d1643a904602595ba1cd8f7d896afe56d26712531c5ff73a15b2fbf64" +checksum = "9b7820b9daea5457c9f21c69448905d723fbd21136ccf521748f23fd49e723ee" [[package]] name = "output_vt100" -version = "0.1.2" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53cdc5b785b7a58c5aad8216b3dfa114df64b0b06ae6e1501cef91df2fbdf8f9" +checksum = "628223faebab4e3e40667ee0b2336d34a5b960ff60ea743ddfdbcf7770bcfb66" dependencies = [ "winapi", ] @@ -4706,6 +4769,16 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39" +[[package]] +name = "packed_simd_2" +version = "0.3.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1914cd452d8fccd6f9db48147b29fd4ae05bea9dc5d9ad578509f72415de282" +dependencies = [ + "cfg-if", + "libm 0.1.4", +] + [[package]] name = "pallet-alliance" version = "4.0.0-dev" @@ -4720,7 +4793,7 @@ dependencies = [ "pallet-identity", "parity-scale-codec", "scale-info", - "sha2 0.10.2", + "sha2 0.10.6", "sp-core", "sp-io", "sp-runtime", @@ -5405,7 +5478,7 @@ dependencies = [ ] [[package]] -name = "pallet-nft-fractionalisation" +name = "pallet-nft-fractionalization" version = "4.0.0-dev" dependencies = [ "frame-benchmarking", @@ -5441,24 +5514,6 @@ dependencies = [ "sp-std", ] -[[package]] -name = "pallet-nfts" -version = "4.0.0-dev" -dependencies = [ - "enumflags2", - "frame-benchmarking", - "frame-support", - "frame-system", - "log", - "pallet-balances", - "parity-scale-codec", - "scale-info", - "sp-core", - "sp-io", - "sp-runtime", - "sp-std", -] - [[package]] name = "pallet-nicks" version = "4.0.0-dev" @@ -6203,9 +6258,9 @@ dependencies = [ [[package]] name = "parity-scale-codec" -version = "3.1.3" +version = "3.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "04bc9583b5e01cc8c70d89acc9af14ef9b1c29ee3a0074b2a9eea8c0fa396690" +checksum = "366e44391a8af4cfd6002ef6ba072bae071a96aafca98d7d448a34c5dca38b6a" dependencies = [ "arrayvec 0.7.2", "bitvec", @@ -6254,7 +6309,7 @@ checksum = "7d17b78036a60663b797adeaee46f5c9dfebb86948d1255007a1d6be0271ff99" dependencies = [ "instant", "lock_api", - "parking_lot_core 0.8.5", + "parking_lot_core 0.8.6", ] [[package]] @@ -6264,14 +6319,14 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f" dependencies = [ "lock_api", - "parking_lot_core 0.9.1", + "parking_lot_core 0.9.5", ] [[package]] name = "parking_lot_core" -version = "0.8.5" +version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d76e8e1493bcac0d2766c42737f34458f1c8c50c0d23bcb24ea953affb273216" +checksum = "60a2cfe6f0ad2bfc16aefa463b497d5c7a5ecd44a23efa72aa342d90177356dc" dependencies = [ "cfg-if", "instant", @@ -6283,22 +6338,22 @@ dependencies = [ [[package]] name = "parking_lot_core" -version = "0.9.1" +version = "0.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28141e0cc4143da2443301914478dc976a61ffdb3f043058310c70df2fed8954" +checksum = "7ff9f3fef3968a3ec5945535ed654cb38ff72d7495a25619e2247fb15a2ed9ba" dependencies = [ "cfg-if", "libc", "redox_syscall", "smallvec", - "windows-sys 0.32.0", + "windows-sys 0.42.0", ] [[package]] name = "paste" -version = "1.0.9" +version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1de2e551fb905ac83f73f7aedf2f0cb4a0da7e35efa24a202a936269f1f18e1" +checksum = "d01a5bd0424d00070b0098dd17ebca6f961a959dead1dbcbbbc1d1cd8d3deeba" [[package]] name = "pbkdf2" @@ -6315,7 +6370,7 @@ version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "83a0692ec44e4cf1ef28ca317f14f8f07da2d95ec3fa01f86e4467b725e60917" dependencies = [ - "digest 0.10.3", + "digest 0.10.6", ] [[package]] @@ -6326,15 +6381,15 @@ checksum = "19b17cddbe7ec3f8bc800887bab5e717348c95ea2ca0b1bf0837fb964dc67099" [[package]] name = "percent-encoding" -version = "2.1.0" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d4fd5641d01c8f18a23da7b6fe29298ff4b55afcccdf78973b24cf3175fee32e" +checksum = "478c572c3d73181ff3c2539045f6eb99e5491218eae919370993b890cdbdd98e" [[package]] name = "pest" -version = "2.4.0" +version = "2.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dbc7bc69c062e492337d74d59b120c274fd3d261b6bf6d3207d499b4b379c41a" +checksum = "0f6e86fb9e7026527a0d46bc308b841d73170ef8f443e1807f6ef88526a816d4" dependencies = [ "thiserror", "ucd-trie", @@ -6342,9 +6397,9 @@ dependencies = [ [[package]] name = "pest_derive" -version = "2.4.0" +version = "2.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60b75706b9642ebcb34dab3bc7750f811609a0eb1dd8b88c2d15bf628c1c65b2" +checksum = "96504449aa860c8dcde14f9fba5c58dc6658688ca1fe363589d6327b8662c603" dependencies = [ "pest", "pest_generator", @@ -6352,9 +6407,9 @@ dependencies = [ [[package]] name = "pest_generator" -version = "2.4.0" +version = "2.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4f9272122f5979a6511a749af9db9bfc810393f63119970d7085fed1c4ea0db" +checksum = "798e0220d1111ae63d66cb66a5dcb3fc2d986d520b98e49e1852bfdb11d7c5e7" dependencies = [ "pest", "pest_meta", @@ -6365,9 +6420,9 @@ dependencies = [ [[package]] name = "pest_meta" -version = "2.4.0" +version = "2.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c8717927f9b79515e565a64fe46c38b8cd0427e64c40680b14a7365ab09ac8d" +checksum = "984298b75898e30a843e278a9f2452c31e349a073a0ce6fd950a12a74464e065" dependencies = [ "once_cell", "pest", @@ -6376,9 +6431,9 @@ dependencies = [ [[package]] name = "petgraph" -version = "0.6.0" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a13a2fa9d0b63e5f22328828741e523766fff0ee9e779316902290dff3f824f" +checksum = "e6d5014253a1331579ce62aa67443b4a658c5e7dd03d4bc6d302b94474888143" dependencies = [ "fixedbitset", "indexmap", @@ -6434,9 +6489,9 @@ dependencies = [ [[package]] name = "pkg-config" -version = "0.3.19" +version = "0.3.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3831453b3449ceb48b6d9c7ad7c96d5ea673e9b470a1dc578c2ce6521230884c" +checksum = "6ac9a59f73473f1b8d852421e59e64809f025994837ef743615c6d0c5b305160" [[package]] name = "platforms" @@ -6444,11 +6499,17 @@ version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e8d0eef3571242013a0d5dc84861c3ae4a652e56e12adf8bdc26ff5f8cb34c94" +[[package]] +name = "platforms" +version = "3.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3d7ddaed09e0eb771a79ab0fd64609ba0afb0a8366421957936ad14cbd13630" + [[package]] name = "plotters" -version = "0.3.1" +version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32a3fd9ec30b9749ce28cd91f255d569591cdf937fe280c312143e3c4bad6f2a" +checksum = "2538b639e642295546c50fcd545198c9d64ee2a38620a628724a3b266d5fbf97" dependencies = [ "num-traits", "plotters-backend", @@ -6459,31 +6520,31 @@ dependencies = [ [[package]] name = "plotters-backend" -version = "0.3.0" +version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b07fffcddc1cb3a1de753caa4e4df03b79922ba43cf882acc1bdd7e8df9f4590" +checksum = "193228616381fecdc1224c62e96946dfbc73ff4384fba576e052ff8c1bea8142" [[package]] name = "plotters-svg" -version = "0.3.0" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b38a02e23bd9604b842a812063aec4ef702b57989c37b655254bb61c471ad211" +checksum = "f9a81d2759aae1dae668f783c308bc5c8ebd191ff4184aaa1b37f65a6ae5a56f" dependencies = [ "plotters-backend", ] [[package]] name = "polling" -version = "2.4.0" +version = "2.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab4609a838d88b73d8238967b60dd115cc08d38e2bbaf51ee1e4b695f89122e2" +checksum = "22122d5ec4f9fe1b3916419b76be1e80bcb93f618d071d2edf841b137b2a2bd6" dependencies = [ "autocfg", "cfg-if", "libc", "log", "wepoll-ffi", - "winapi", + "windows-sys 0.42.0", ] [[package]] @@ -6511,15 +6572,15 @@ dependencies = [ [[package]] name = "ppv-lite86" -version = "0.2.10" +version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac74c624d6b2d21f425f752262f42188365d7b8ff1aff74c82e45136510a4857" +checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" [[package]] name = "predicates" -version = "2.1.3" +version = "2.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed6bd09a7f7e68f3f0bf710fb7ab9c4615a488b58b5f653382a687701e458c92" +checksum = "59230a63c37f3e18569bdb90e4a89cbf5bf8b06fea0b84e65ea10cc4df47addd" dependencies = [ "difflib", "float-cmp", @@ -6531,37 +6592,47 @@ dependencies = [ [[package]] name = "predicates-core" -version = "1.0.2" +version = "1.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57e35a3326b75e49aa85f5dc6ec15b41108cf5aee58eabb1f274dd18b73c2451" +checksum = "72f883590242d3c6fc5bf50299011695fa6590c2c70eac95ee1bdb9a733ad1a2" [[package]] name = "predicates-tree" -version = "1.0.2" +version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "15f553275e5721409451eb85e15fd9a860a6e5ab4496eb215987502b5f5391f2" +checksum = "54ff541861505aabf6ea722d2131ee980b8276e10a1297b94e896dd8b621850d" dependencies = [ "predicates-core", - "treeline", + "termtree", ] [[package]] name = "pretty_assertions" -version = "1.2.1" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c89f989ac94207d048d92db058e4f6ec7342b0971fc58d1271ca148b799b3563" +checksum = "a25e9bcb20aa780fd0bb16b72403a9064d6b3f22f026946029acb941a50af755" dependencies = [ - "ansi_term", "ctor", "diff", "output_vt100", + "yansi", +] + +[[package]] +name = "prettyplease" +version = "0.1.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2c8992a85d8e93a28bdf76137db888d3874e3b230dee5ed8bebac4c9f7617773" +dependencies = [ + "proc-macro2", + "syn", ] [[package]] name = "primitive-types" -version = "0.12.0" +version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5cfd65aea0c5fa0bfcc7c9e7ca828c921ef778f43d325325ec84bda371bfa75a" +checksum = "9f3486ccba82358b11a77516035647c34ba167dfa53312630de83b12bd4f3d66" dependencies = [ "fixed-hash", "impl-codec", @@ -6572,10 +6643,11 @@ dependencies = [ [[package]] name = "proc-macro-crate" -version = "1.1.3" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e17d47ce914bf4de440332250b0edd23ce48c005f59fab39d3335866b114f11a" +checksum = "eda0fc3b0fb7c975631757e14d9049da17374063edb6ebbcbc54d880d4fe94e9" dependencies = [ + "once_cell", "thiserror", "toml", ] @@ -6606,35 +6678,35 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.46" +version = "1.0.49" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94e2ef8dbfc347b10c094890f778ee2e36ca9bb4262e86dc99cd217e35f3470b" +checksum = "57a8eca9f9c4ffde41714334dee777596264c7825420f521abc92b5b5deb63a5" dependencies = [ "unicode-ident", ] [[package]] name = "prometheus" -version = "0.13.0" +version = "0.13.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b7f64969ffd5dd8f39bd57a68ac53c163a095ed9d0fb707146da1b27025a3504" +checksum = "449811d15fbdf5ceb5c1144416066429cf82316e2ec8ce0c1f6f8a02e7bbcf8c" dependencies = [ "cfg-if", "fnv", "lazy_static", "memchr", - "parking_lot 0.11.2", + "parking_lot 0.12.1", "thiserror", ] [[package]] name = "prometheus-client" -version = "0.18.0" +version = "0.18.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c473049631c233933d6286c88bbb7be30e62ec534cf99a9ae0079211f7fa603" +checksum = "83cd1b99916654a69008fd66b4f9397fbe08e6e51dfe23d4417acf5d3b8cb87c" dependencies = [ "dtoa", - "itoa 1.0.4", + "itoa 1.0.5", "parking_lot 0.12.1", "prometheus-client-derive-text-encode", ] @@ -6652,9 +6724,9 @@ dependencies = [ [[package]] name = "prost" -version = "0.11.0" +version = "0.11.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "399c3c31cdec40583bb68f0b18403400d01ec4289c383aa047560439952c4dd7" +checksum = "c01db6702aa05baa3f57dec92b8eeeeb4cb19e894e73996b32a4093289e54592" dependencies = [ "bytes", "prost-derive", @@ -6662,9 +6734,9 @@ dependencies = [ [[package]] name = "prost-build" -version = "0.11.1" +version = "0.11.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f835c582e6bd972ba8347313300219fed5bfa52caf175298d860b61ff6069bb" +checksum = "cb5320c680de74ba083512704acb90fe00f28f79207286a848e730c45dd73ed6" dependencies = [ "bytes", "heck", @@ -6673,9 +6745,11 @@ dependencies = [ "log", "multimap", "petgraph", + "prettyplease", "prost", "prost-types", "regex", + "syn", "tempfile", "which", ] @@ -6695,9 +6769,9 @@ dependencies = [ [[package]] name = "prost-derive" -version = "0.11.0" +version = "0.11.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7345d5f0e08c0536d7ac7229952590239e77abf0a0100a1b1d890add6ea96364" +checksum = "c8842bad1a5419bca14eac663ba798f6bc19c413c2fdceb5f3ba3b0932d96720" dependencies = [ "anyhow", "itertools", @@ -6708,9 +6782,9 @@ dependencies = [ [[package]] name = "prost-types" -version = "0.11.1" +version = "0.11.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4dfaa718ad76a44b3415e6c4d53b17c8f99160dcb3a99b10470fce8ad43f6e3e" +checksum = "017f79637768cde62820bc2d4fe0e45daaa027755c323ad077767c6c5f173091" dependencies = [ "bytes", "prost", @@ -6718,9 +6792,9 @@ dependencies = [ [[package]] name = "psm" -version = "0.1.12" +version = "0.1.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3abf49e5417290756acfd26501536358560c4a5cc4a0934d390939acb3e7083a" +checksum = "5787f7cda34e3033a72192c018bc5883100330f362ef279a8cbccfce8bb4e874" dependencies = [ "cc", ] @@ -6753,9 +6827,9 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.18" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1feb54ed693b93a84e14094943b84b7c4eae204c512b7ccb95ab0c66d278ad1" +checksum = "8856d8364d252a14d474036ea1358d63c9e6965c8e5c1885c18f73d70bff9c7b" dependencies = [ "proc-macro2", ] @@ -6787,7 +6861,7 @@ checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" dependencies = [ "libc", "rand_chacha 0.3.1", - "rand_core 0.6.2", + "rand_core 0.6.4", ] [[package]] @@ -6807,7 +6881,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" dependencies = [ "ppv-lite86", - "rand_core 0.6.2", + "rand_core 0.6.4", ] [[package]] @@ -6821,11 +6895,11 @@ dependencies = [ [[package]] name = "rand_core" -version = "0.6.2" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34cf66eb183df1c5876e2dcf6b13d57340741e8dc255b48e40a26de954d06ae7" +checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" dependencies = [ - "getrandom 0.2.3", + "getrandom 0.2.8", ] [[package]] @@ -6853,7 +6927,7 @@ version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "59cad018caf63deb318e5a4586d99a24424a364f40f1e5778c29aca23f4fc73e" dependencies = [ - "rand_core 0.6.2", + "rand_core 0.6.4", ] [[package]] @@ -6864,62 +6938,60 @@ checksum = "60a357793950651c4ed0f3f52338f53b2f809f32d83a07f72909fa13e4c6c1e3" [[package]] name = "rayon" -version = "1.5.0" +version = "1.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b0d8e0819fadc20c74ea8373106ead0600e3a67ef1fe8da56e39b9ae7275674" +checksum = "6db3a213adf02b3bcfd2d3846bb41cb22857d131789e01df434fb7e7bc0759b7" dependencies = [ - "autocfg", - "crossbeam-deque", "either", "rayon-core", ] [[package]] name = "rayon-core" -version = "1.9.0" +version = "1.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ab346ac5921dc62ffa9f89b7a773907511cdfa5490c572ae9be1be33e8afa4a" +checksum = "cac410af5d00ab6884528b4ab69d1e8e146e8d471201800fa1b4524126de6ad3" dependencies = [ "crossbeam-channel", "crossbeam-deque", "crossbeam-utils", - "lazy_static", "num_cpus", ] [[package]] name = "redox_syscall" -version = "0.2.10" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8383f39639269cde97d255a32bdb68c047337295414940c68bdd30c2e13203ff" +checksum = "fb5a58c1855b4b6819d59012155603f0b22ad30cad752600aadfcb695265519a" dependencies = [ "bitflags", ] [[package]] name = "redox_users" -version = "0.4.0" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "528532f3d801c87aec9def2add9ca802fe569e44a544afe633765267840abe64" +checksum = "b033d837a7cf162d7993aded9304e30a83213c648b6e389db233191f891e5c2b" dependencies = [ - "getrandom 0.2.3", + "getrandom 0.2.8", "redox_syscall", + "thiserror", ] [[package]] name = "ref-cast" -version = "1.0.6" +version = "1.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "300f2a835d808734ee295d45007adacb9ebb29dd3ae2424acfa17930cae541da" +checksum = "8c78fb8c9293bcd48ef6fce7b4ca950ceaf21210de6e105a883ee280c0f7b9ed" dependencies = [ "ref-cast-impl", ] [[package]] name = "ref-cast-impl" -version = "1.0.6" +version = "1.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c38e3aecd2b21cb3959637b883bb3714bc7e43f0268b9a29d3743ee3e55cdd2" +checksum = "9f9c0c92af03644e4806106281fe2e068ac5bc0ae74a707266d06ea27bccee5f" dependencies = [ "proc-macro2", "quote", @@ -6940,9 +7012,9 @@ dependencies = [ [[package]] name = "regex" -version = "1.6.0" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c4eb3267174b8c6c2f654116623910a0fef09c4753f8dd83db29c48a0df988b" +checksum = "e076559ef8e241f2ae3479e36f97bd5741c0330689e217ad51ce2c76808b868a" dependencies = [ "aho-corasick", "memchr", @@ -6951,19 +7023,18 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.1.9" +version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae1ded71d66a4a97f5e961fd0cb25a5f366a42a41570d16a763a69c092c26ae4" +checksum = "6c230d73fb8d8c1b9c0b3135c5142a8acee3a0558fb8db5cf1cb65f8d7862132" dependencies = [ - "byteorder", "regex-syntax", ] [[package]] name = "regex-syntax" -version = "0.6.27" +version = "0.6.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a3f87b73ce11b1619a3c6332f45341e0047173771e8b8b73f87bfeefb7b56244" +checksum = "456c603be3e8d448b072f410900c09faf164fbce2d480456f50eea6e25f9c848" [[package]] name = "remove_dir_all" @@ -6986,9 +7057,9 @@ dependencies = [ [[package]] name = "rfc6979" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "88c86280f057430a52f4861551b092a01b419b8eacefc7c995eacb9dc132fe32" +checksum = "7743f17af12fa0b03b803ba12cd6a8d9483a587e89c69445e3909655c0b9fabb" dependencies = [ "crypto-bigint", "hmac 0.12.1", @@ -7022,11 +7093,12 @@ dependencies = [ [[package]] name = "rpassword" -version = "7.0.0" +version = "7.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26b763cb66df1c928432cc35053f8bd4cec3335d8559fc16010017d16b3c1680" +checksum = "6678cf63ab3491898c0d021b493c94c9b221d91295294a2a5746eacbe5928322" dependencies = [ "libc", + "rtoolbox", "winapi", ] @@ -7041,15 +7113,25 @@ dependencies = [ "log", "netlink-packet-route", "netlink-proto", - "nix 0.24.2", + "nix 0.24.3", "thiserror", ] +[[package]] +name = "rtoolbox" +version = "0.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "034e22c514f5c0cb8a10ff341b9b048b5ceb21591f31c8f44c43b960f9b3524a" +dependencies = [ + "libc", + "winapi", +] + [[package]] name = "rustc-demangle" -version = "0.1.18" +version = "0.1.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e3bad0ee36814ca07d7968269dd4b7ec89ec2da10c4bb613928d3077083c232" +checksum = "7ef03e0a2b150c7a90d01faf6254c9c48a41e95fb2a8c2ac1c6f0d2b9aefc342" [[package]] name = "rustc-hash" @@ -7078,28 +7160,42 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" dependencies = [ - "semver 1.0.4", + "semver 1.0.16", ] [[package]] name = "rustix" -version = "0.35.9" +version = "0.35.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72c825b8aa8010eb9ee99b75f05e10180b9278d161583034d7574c9d617aeada" +checksum = "727a1a6d65f786ec22df8a81ca3121107f235970dc1705ed681d3e6e8b9cd5f9" dependencies = [ "bitflags", "errno", - "io-lifetimes", + "io-lifetimes 0.7.5", "libc", - "linux-raw-sys", - "windows-sys 0.36.1", + "linux-raw-sys 0.0.46", + "windows-sys 0.42.0", +] + +[[package]] +name = "rustix" +version = "0.36.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4feacf7db682c6c329c4ede12649cd36ecab0f3be5b7d74e6a20304725db4549" +dependencies = [ + "bitflags", + "errno", + "io-lifetimes 1.0.3", + "libc", + "linux-raw-sys 0.1.4", + "windows-sys 0.42.0", ] [[package]] name = "rustls" -version = "0.20.2" +version = "0.20.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d37e5e2290f3e040b594b1a9e04377c2c671f1a1cfd9bfdef82106ac1c113f84" +checksum = "539a2bfe908f471bfa933876bd1eb6a19cf2176d375f82ef7f99530a40e48c2c" dependencies = [ "log", "ring", @@ -7109,9 +7205,9 @@ dependencies = [ [[package]] name = "rustls-native-certs" -version = "0.6.1" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ca9ebdfa27d3fc180e42879037b5338ab1c040c06affd00d8338598e7800943" +checksum = "0167bac7a9f490495f3c33013e7722b53cb087ecbe082fb0c6387c96f634ea50" dependencies = [ "openssl-probe", "rustls-pemfile", @@ -7121,18 +7217,18 @@ dependencies = [ [[package]] name = "rustls-pemfile" -version = "0.2.1" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5eebeaeb360c87bfb72e84abdb3447159c0eaececf1bef2aecd65a8be949d1c9" +checksum = "0864aeff53f8c05aa08d86e5ef839d3dfcf07aeba2db32f12db0ef716e87bd55" dependencies = [ "base64", ] [[package]] name = "rustversion" -version = "1.0.6" +version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2cc38e8fa666e2de3c4aba7edeb5ffc5246c1c2ed0e3d17e560aeeba736b23f" +checksum = "5583e89e108996506031660fe09baa5011b9dd0341b89029313006d1fb508d70" [[package]] name = "rusty-fork" @@ -7158,9 +7254,9 @@ dependencies = [ [[package]] name = "ryu" -version = "1.0.5" +version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "71d301d4193d031abdd79ff7e3dd721168a9572ef3fe51a1517aba235bd8f86e" +checksum = "7b4b9743ed687d4b4bcedf9ff5eaa7398495ae14e61cba0a295704edbc7decde" [[package]] name = "safe-mix" @@ -7290,7 +7386,7 @@ version = "0.10.0-dev" dependencies = [ "array-bytes", "chrono", - "clap 4.0.11", + "clap 4.0.32", "fdlimit", "futures", "futures-timer", @@ -7662,7 +7758,7 @@ dependencies = [ "tempfile", "tracing", "tracing-subscriber 0.2.25", - "wasmi 0.13.0", + "wasmi 0.13.2", "wat", ] @@ -7675,7 +7771,7 @@ dependencies = [ "sp-wasm-interface", "thiserror", "wasm-instrument 0.3.0", - "wasmi 0.13.0", + "wasmi 0.13.2", ] [[package]] @@ -7687,7 +7783,7 @@ dependencies = [ "sc-executor-common", "sp-runtime-interface", "sp-wasm-interface", - "wasmi 0.13.0", + "wasmi 0.13.2", ] [[package]] @@ -7700,7 +7796,7 @@ dependencies = [ "once_cell", "parity-scale-codec", "paste", - "rustix", + "rustix 0.35.13", "sc-allocator", "sc-executor-common", "sc-runtime-test", @@ -7857,7 +7953,7 @@ dependencies = [ "tempfile", "thiserror", "tokio", - "tokio-util 0.7.4", + "tokio-util", "unsigned-varint", "zeroize", ] @@ -8479,9 +8575,9 @@ dependencies = [ [[package]] name = "scale-info" -version = "2.2.0" +version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "333af15b02563b8182cd863f925bd31ef8fa86a0e095d30c091956057d436153" +checksum = "001cf62ece89779fd16105b5f515ad0e5cedcd5440d3dd806bb067978e7c3608" dependencies = [ "bitvec", "cfg-if", @@ -8493,9 +8589,9 @@ dependencies = [ [[package]] name = "scale-info-derive" -version = "2.2.0" +version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53f56acbd0743d29ffa08f911ab5397def774ad01bab3786804cf6ee057fb5e1" +checksum = "303959cf613a6f6efd19ed4b4ad5bf79966a13352716299ad532cfb115f4205c" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -8505,12 +8601,12 @@ dependencies = [ [[package]] name = "schannel" -version = "0.1.19" +version = "0.1.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f05ba609c234e60bee0d547fe94a4c7e9da733d1c962cf6e59efa4cd9c8bc75" +checksum = "88d6731146462ea25d9244b2ed5fd1d716d25c52e4d54aa4fb0f3c4e9854dbe2" dependencies = [ "lazy_static", - "winapi", + "windows-sys 0.36.1", ] [[package]] @@ -8521,7 +8617,7 @@ checksum = "021b403afe70d81eea68f6ea12f6b3c9588e5d536a94c3bf80f15e7faa267862" dependencies = [ "arrayref", "arrayvec 0.5.2", - "curve25519-dalek 2.1.2", + "curve25519-dalek 2.1.3", "getrandom 0.1.16", "merlin", "rand 0.7.3", @@ -8539,9 +8635,9 @@ checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" [[package]] name = "scratch" -version = "1.0.2" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c8132065adcfd6e02db789d9285a0deb2f3fcb04002865ab67d5fb103533898" +checksum = "ddccb15bcce173023b3fedd9436f882a0739b8dfb45e4f6b6002bee5929f61b2" [[package]] name = "sct" @@ -8561,7 +8657,7 @@ checksum = "3be24c1842290c45df0a7bf069e0c268a747ad05a192f2fd7dcfdbc1cba40928" dependencies = [ "base16ct", "der", - "generic-array 0.14.4", + "generic-array 0.14.6", "pkcs8", "subtle", "zeroize", @@ -8569,18 +8665,18 @@ dependencies = [ [[package]] name = "secp256k1" -version = "0.24.0" +version = "0.24.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b7649a0b3ffb32636e60c7ce0d70511eda9c52c658cd0634e194d5a19943aeff" +checksum = "d9512ffd81e3a3503ed401f79c33168b9148c75038956039166cd750eaa037c3" dependencies = [ "secp256k1-sys", ] [[package]] name = "secp256k1-sys" -version = "0.6.0" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7058dc8eaf3f2810d7828680320acda0b25a288f6d288e19278e249bbf74226b" +checksum = "83080e2c2fc1006e625be82e5d1eb6a43b7fd9578b617fcc55814daf286bba4b" dependencies = [ "cc", ] @@ -8596,9 +8692,9 @@ dependencies = [ [[package]] name = "security-framework" -version = "2.3.0" +version = "2.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b239a3d5db51252f6f48f42172c65317f37202f4a21021bf5f9d40a408f4592c" +checksum = "2bc1bb97804af6631813c55739f771071e0f2ed33ee20b68c86ec505d906356c" dependencies = [ "bitflags", "core-foundation", @@ -8609,9 +8705,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.3.0" +version = "2.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e4effb91b4b8b6fb7732e670b6cee160278ff8e6bf485c7805d9e319d76e284" +checksum = "0160a13a177a45bfb43ce71c01580998474f556ad854dcbca936dd2841a5c556" dependencies = [ "core-foundation-sys", "libc", @@ -8637,9 +8733,9 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.4" +version = "1.0.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "568a8e6258aa33c13358f81fd834adb854c6f7c9468520910a9b1e8fac068012" +checksum = "58bc9567378fc7690d6b2addae4e60ac2eeea07becb2c64b9f218b53865cba2a" dependencies = [ "serde", ] @@ -8652,18 +8748,18 @@ checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" [[package]] name = "serde" -version = "1.0.145" +version = "1.0.152" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "728eb6351430bccb993660dfffc5a72f91ccc1295abaa8ce19b27ebe4f75568b" +checksum = "bb7d1f0d3021d347a83e556fc4683dea2ea09d87bccdf88ff5c12545d89d5efb" dependencies = [ "serde_derive", ] [[package]] name = "serde_cbor" -version = "0.11.1" +version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e18acfa2f90e8b735b2836ab8d538de304cbb6729a7360729ea5a895d15a622" +checksum = "2bef2ebfde456fb76bbcf9f59315333decc4fda0b2b44b420243c11e0f5ec1f5" dependencies = [ "half", "serde", @@ -8671,9 +8767,9 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.145" +version = "1.0.152" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81fa1584d3d1bcacd84c277a0dfe21f5b0f6accf4a23d04d4c6d61f1af522b4c" +checksum = "af487d118eecd09402d70a5d72551860e788df87b464af30e5ea6a38c75c541e" dependencies = [ "proc-macro2", "quote", @@ -8682,37 +8778,37 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.85" +version = "1.0.91" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e55a28e3aaef9d5ce0506d0a14dbba8054ddc7e499ef522dd8b26859ec9d4a44" +checksum = "877c235533714907a8c2464236f5c4b2a17262ef1bd71f38f35ea592c8da6883" dependencies = [ - "itoa 1.0.4", + "itoa 1.0.5", "ryu", "serde", ] [[package]] name = "sha-1" -version = "0.9.4" +version = "0.9.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dfebf75d25bd900fd1e7d11501efab59bc846dbc76196839663e6637bba9f25f" +checksum = "99cd6713db3cf16b6c84e06321e049a9b9f699826e16096d23bbcc44d15d51a6" dependencies = [ "block-buffer 0.9.0", "cfg-if", - "cpuid-bool", + "cpufeatures", "digest 0.9.0", "opaque-debug 0.3.0", ] [[package]] name = "sha1" -version = "0.10.4" +version = "0.10.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "006769ba83e921b3085caa8334186b00cf92b4cb1a6cf4632fbccc8eff5c7549" +checksum = "f04293dc80c3993519f2d7f6f511707ee7094fe0c6d3406feb330cdb3540eba3" dependencies = [ "cfg-if", "cpufeatures", - "digest 0.10.3", + "digest 0.10.6", ] [[package]] @@ -8729,9 +8825,9 @@ dependencies = [ [[package]] name = "sha2" -version = "0.9.8" +version = "0.9.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b69f9a4c9740d74c5baa3fd2e547f9525fa8088a8a958e0ca2409a514e33f5fa" +checksum = "4d58a1e1bf39749807d89cf2d98ac2dfa0ff1cb3faa38fbb64dd88ac8013d800" dependencies = [ "block-buffer 0.9.0", "cfg-if", @@ -8742,22 +8838,22 @@ dependencies = [ [[package]] name = "sha2" -version = "0.10.2" +version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "55deaec60f81eefe3cce0dc50bda92d6d8e88f2a27df7c5033b42afeb1ed2676" +checksum = "82e6b795fe2e3b1e845bafcb27aa35405c4d47cdfc92af5fc8d3002f76cebdc0" dependencies = [ "cfg-if", "cpufeatures", - "digest 0.10.3", + "digest 0.10.6", ] [[package]] name = "sha3" -version = "0.10.0" +version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31f935e31cf406e8c0e96c2815a5516181b7004ae8c5f296293221e9b1e356bd" +checksum = "bdf0c33fae925bdc080598b84bc15c55e7b9a4a43b3c704da051f977469691c9" dependencies = [ - "digest 0.10.3", + "digest 0.10.6", "keccak", ] @@ -8772,27 +8868,27 @@ dependencies = [ [[package]] name = "shlex" -version = "1.0.0" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42a568c8f2cd051a4d283bd6eb0343ac214c1b0f1ac19f93e1175b2dee38c73d" +checksum = "43b2853a4d09f215c24cc5489c992ce46052d359b5109343cbafbf26bc62f8a3" [[package]] name = "signal-hook-registry" -version = "1.3.0" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16f1d0fef1604ba8f7a073c7e701f213e056707210e9020af4528e0101ce11a6" +checksum = "e51e73328dc4ac0c7ccbda3a494dfa03df1de2f46018127f60c693f2648455b0" dependencies = [ "libc", ] [[package]] name = "signature" -version = "1.6.3" +version = "1.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "deb766570a2825fa972bceff0d195727876a9cdf2460ab2e52d455dc2de47fd9" +checksum = "74233d3b3b2f6d4b006dc19dee745e73e2a6bfb6f93607cd3b02bd5b00797d7c" dependencies = [ - "digest 0.10.3", - "rand_core 0.6.2", + "digest 0.10.6", + "rand_core 0.6.4", ] [[package]] @@ -8809,9 +8905,12 @@ dependencies = [ [[package]] name = "slab" -version = "0.4.2" +version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c111b5bd5695e56cffe5129854aa230b39c93a305372fdbb2668ca2394eea9f8" +checksum = "4614a76b2a8be0058caa9dbbaf66d988527d86d003c11a94fbd335d7661edcef" +dependencies = [ + "autocfg", +] [[package]] name = "slice-group-by" @@ -8827,9 +8926,9 @@ checksum = "a507befe795404456341dfab10cef66ead4c041f62b8b11bbb92bffe5d0953e0" [[package]] name = "snap" -version = "1.0.5" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "45456094d1983e2ee2a18fdfebce3189fa451699d0502cb8e3b49dba5ba41451" +checksum = "5e9f0ab6ef7eb7353d9119c170a436d1bf248eea575ac42d19d12f4e34130831" [[package]] name = "snow" @@ -8840,19 +8939,19 @@ dependencies = [ "aes-gcm", "blake2", "chacha20poly1305", - "curve25519-dalek 4.0.0-pre.1", - "rand_core 0.6.2", + "curve25519-dalek 4.0.0-pre.5", + "rand_core 0.6.4", "ring", "rustc_version 0.4.0", - "sha2 0.10.2", + "sha2 0.10.6", "subtle", ] [[package]] name = "socket2" -version = "0.4.4" +version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "66d72b759436ae32898a2af0a14218dbf55efde3feeb170eb623637db85ee1e0" +checksum = "02e2d2db9033d13a1567121ddd7a095ee144db4e1ca1b1bda3419bc0da294ebd" dependencies = [ "libc", "winapi", @@ -9187,8 +9286,8 @@ version = "5.0.0" dependencies = [ "blake2", "byteorder", - "digest 0.10.3", - "sha2 0.10.2", + "digest 0.10.6", + "sha2 0.10.6", "sha3", "sp-std", "twox-hash", @@ -9359,7 +9458,7 @@ dependencies = [ name = "sp-npos-elections-fuzzer" version = "2.0.0-alpha.5" dependencies = [ - "clap 4.0.11", + "clap 4.0.32", "honggfuzz", "parity-scale-codec", "rand 0.8.5", @@ -9635,7 +9734,7 @@ dependencies = [ "array-bytes", "criterion", "hash-db", - "hashbrown 0.12.3", + "hashbrown", "lazy_static", "lru", "memory-db", @@ -9689,7 +9788,7 @@ dependencies = [ "log", "parity-scale-codec", "sp-std", - "wasmi 0.13.0", + "wasmi 0.13.2", "wasmtime", ] @@ -9731,9 +9830,9 @@ dependencies = [ [[package]] name = "ss58-registry" -version = "1.34.0" +version = "1.36.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37a9821878e1f13aba383aa40a86fb1b33c7265774ec91e32563cb1dd1577496" +checksum = "23d92659e7d18d82b803824a9ba5a6022cff101c3491d027c1c1d8d30e749284" dependencies = [ "Inflector", "num-format", @@ -9766,7 +9865,7 @@ dependencies = [ "cfg_aliases", "libc", "parking_lot 0.11.2", - "parking_lot_core 0.8.5", + "parking_lot_core 0.8.6", "static_init_macro", "winapi", ] @@ -9814,9 +9913,9 @@ dependencies = [ [[package]] name = "strum_macros" -version = "0.24.2" +version = "0.24.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4faebde00e8ff94316c01800f9054fd2ba77d30d9e922541913051d1d978918b" +checksum = "1e385be0d24f186b4ce2f9982191e7101bb737312ad61c1f2f984f34bcf85d59" dependencies = [ "heck", "proc-macro2", @@ -9829,7 +9928,7 @@ dependencies = [ name = "subkey" version = "2.0.2" dependencies = [ - "clap 4.0.11", + "clap 4.0.32", "sc-cli", ] @@ -9842,7 +9941,7 @@ dependencies = [ "hmac 0.11.0", "pbkdf2 0.8.0", "schnorrkel", - "sha2 0.9.8", + "sha2 0.9.9", "zeroize", ] @@ -9850,14 +9949,14 @@ dependencies = [ name = "substrate-build-script-utils" version = "3.0.0" dependencies = [ - "platforms", + "platforms 2.0.0", ] [[package]] name = "substrate-frame-cli" version = "4.0.0-dev" dependencies = [ - "clap 4.0.11", + "clap 4.0.32", "frame-support", "frame-system", "sc-cli", @@ -10102,15 +10201,15 @@ dependencies = [ [[package]] name = "subtle" -version = "2.4.0" +version = "2.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e81da0851ada1f3e9d4312c704aa4f8806f0f9d69faaf8df2f3464b4a9437c2" +checksum = "6bdef32e8150c2a081110b42772ffe7d7c9032b606bc226c8260fd97e0976601" [[package]] name = "syn" -version = "1.0.98" +version = "1.0.107" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c50aef8a904de4c23c788f104b7dddc7d6f79c647c7c8ce4cc8f73eb0ca773dd" +checksum = "1f4064b5b16e03ae50984a5a8ed5d4f8803e6bc1fd170a3cda91a1be4b18e3f5" dependencies = [ "proc-macro2", "quote", @@ -10119,9 +10218,9 @@ dependencies = [ [[package]] name = "synstructure" -version = "0.12.4" +version = "0.12.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b834f2d66f734cb897113e34aaff2f1ab4719ca946f9a7358dba8f8064148701" +checksum = "f36bdaa60a83aca3921b5259d5400cbf5e90fc51931376a9bd4a0eb79aa7210f" dependencies = [ "proc-macro2", "quote", @@ -10158,9 +10257,9 @@ checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" [[package]] name = "target-lexicon" -version = "0.12.3" +version = "0.12.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7fa7e55043acb85fca6b3c01485a2eeb6b69c5d21002e273c79e465f43b7ac1" +checksum = "9410d0f6853b1d94f0e519fb95df60f29d2c1eff2d921ffdf01a4c8a3b54f12d" [[package]] name = "tempfile" @@ -10178,13 +10277,19 @@ dependencies = [ [[package]] name = "termcolor" -version = "1.1.2" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2dfed899f0eb03f32ee8c6a0aabdb8a7949659e3466561fc0adf54e26d88c5f4" +checksum = "bab24d30b911b2376f3a13cc2cd443142f0c81dda04c118693e35b3835757755" dependencies = [ "winapi-util", ] +[[package]] +name = "termtree" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "95059e91184749cb66be6dc994f67f182b6d897cb3df74a5bf66b5e709295fd8" + [[package]] name = "textwrap" version = "0.11.0" @@ -10196,18 +10301,18 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.37" +version = "1.0.38" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10deb33631e3c9018b9baf9dcbbc4f737320d2b576bac10f6aefa048fa407e3e" +checksum = "6a9cd18aa97d5c45c6603caea1da6628790b37f7a34b6ca89522331c5180fed0" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.37" +version = "1.0.38" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "982d17546b47146b28f7c22e3d08465f6b8903d0ea13c1660d9d84a6e7adcdbb" +checksum = "1fb327af4685e4d03fa8cbcf1716380da910eeb2bb8be417e7f9fd3fb164f36f" dependencies = [ "proc-macro2", "quote", @@ -10240,9 +10345,9 @@ dependencies = [ [[package]] name = "tikv-jemalloc-sys" -version = "0.5.1+5.3.0-patched" +version = "0.5.2+5.3.0-patched" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "931e876f91fed0827f863a2d153897790da0b24d882c721a79cb3beb0b903261" +checksum = "ec45c14da997d0925c7835883e4d5c181f196fa142f8c19d7643d1e9af2592c3" dependencies = [ "cc", "fs_extra", @@ -10251,9 +10356,9 @@ dependencies = [ [[package]] name = "time" -version = "0.1.44" +version = "0.1.45" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6db9e6914ab8b1ae1c260a4ae7a49b6c5611b40328a735b21862567685e73255" +checksum = "1b797afad3f312d1c66a56d11d0316f916356d11bd158fbc6ca6389ff6bf805a" dependencies = [ "libc", "wasi 0.10.0+wasi-snapshot-preview1", @@ -10272,7 +10377,7 @@ dependencies = [ "pbkdf2 0.11.0", "rand 0.8.5", "rustc-hash", - "sha2 0.10.2", + "sha2 0.10.6", "thiserror", "unicode-normalization", "wasm-bindgen", @@ -10300,9 +10405,9 @@ dependencies = [ [[package]] name = "tinyvec" -version = "1.1.1" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "317cca572a0e89c3ce0ca1f1bdc9369547fe318a683418e42ac8f59d14701023" +checksum = "87cc5ceb3875bb20c2890005a4e226a4651264a5c75edb2421b52861a0a0cb50" dependencies = [ "tinyvec_macros", ] @@ -10315,9 +10420,9 @@ checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" [[package]] name = "tokio" -version = "1.22.0" +version = "1.23.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d76ce4a75fb488c605c54bf610f221cea8b0dafb53333c1a67e8ee199dcd2ae3" +checksum = "eab6d665857cc6ca78d6e80303a02cea7a7851e85dfbd77cbdc09bd129f1ef46" dependencies = [ "autocfg", "bytes", @@ -10330,14 +10435,14 @@ dependencies = [ "signal-hook-registry", "socket2", "tokio-macros", - "winapi", + "windows-sys 0.42.0", ] [[package]] name = "tokio-macros" -version = "1.7.0" +version = "1.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b557f72f448c511a979e2564e55d74e6c4432fc96ff4f6241bc6bded342643b7" +checksum = "d266c00fde287f55d3f1c3e96c500c362a2b8c695076ec180f27918820bc6df8" dependencies = [ "proc-macro2", "quote", @@ -10346,9 +10451,9 @@ dependencies = [ [[package]] name = "tokio-rustls" -version = "0.23.2" +version = "0.23.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a27d5f2b839802bd8267fa19b0530f5a08b9c08cd417976be2a65d130fe1c11b" +checksum = "c43ee83903113e03984cb9e5cebe6c04a5116269e900e3ddba8f068a62adda59" dependencies = [ "rustls", "tokio", @@ -10357,14 +10462,14 @@ dependencies = [ [[package]] name = "tokio-stream" -version = "0.1.7" +version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b2f3f698253f03119ac0102beaa64f67a67e08074d03a22d18784104543727f" +checksum = "d660770404473ccd7bc9f8b28494a811bc18542b915c0855c51e8f419d5223ce" dependencies = [ "futures-core", "pin-project-lite 0.2.9", "tokio", - "tokio-util 0.6.10", + "tokio-util", ] [[package]] @@ -10380,20 +10485,6 @@ dependencies = [ "tokio-stream", ] -[[package]] -name = "tokio-util" -version = "0.6.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "36943ee01a6d67977dd3f84a5a1d2efeb4ada3a1ae771cadfaa535d9d9fc6507" -dependencies = [ - "bytes", - "futures-core", - "futures-sink", - "log", - "pin-project-lite 0.2.9", - "tokio", -] - [[package]] name = "tokio-util" version = "0.7.4" @@ -10411,9 +10502,9 @@ dependencies = [ [[package]] name = "toml" -version = "0.5.8" +version = "0.5.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a31142970826733df8241ef35dc040ef98c679ab14d7c3e54d827099b3acecaa" +checksum = "1333c76748e868a4d9d1017b5ab53171dfd095f70c712fdb4653a406547f598f" dependencies = [ "serde", ] @@ -10455,15 +10546,15 @@ checksum = "c20c8dbed6283a09604c3e69b4b7eeb54e298b8a600d4d5ecb5ad39de609f1d0" [[package]] name = "tower-service" -version = "0.3.1" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "360dfd1d6d30e05fda32ace2c8c70e9c0a9da713275777f5a4dbb8a1893930c6" +checksum = "b6bc1c9ce2b5135ac7f93c72918fc37feb872bdc6a5533a8b85eb4b86bfdae52" [[package]] name = "tracing" -version = "0.1.35" +version = "0.1.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a400e31aa60b9d44a52a8ee0343b5b18566b03a8321e0d321f695cf56e940160" +checksum = "8ce8c33a8d48bd45d624a6e523445fd21ec13d3653cd51f681abf67418f54eb8" dependencies = [ "cfg-if", "log", @@ -10474,9 +10565,9 @@ dependencies = [ [[package]] name = "tracing-attributes" -version = "0.1.22" +version = "0.1.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11c75893af559bc8e10716548bdef5cb2b983f8e637db9d0e15126b61b484ee2" +checksum = "4017f8f45139870ca7e672686113917c71c7a6e02d4924eda67186083c03081a" dependencies = [ "proc-macro2", "quote", @@ -10516,9 +10607,9 @@ dependencies = [ [[package]] name = "tracing-serde" -version = "0.1.2" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb65ea441fbb84f9f6748fd496cf7f63ec9af5bca94dd86456978d055e8eb28b" +checksum = "bc6b213177105856957181934e4920de57730fc69bf42c37ee5bb664d406d9e1" dependencies = [ "serde", "tracing-core", @@ -10565,12 +10656,6 @@ dependencies = [ "tracing-log", ] -[[package]] -name = "treeline" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7f741b240f1a48843f9b8e0444fb55fb2a4ff67293b50a9179dfd5ea67f8d41" - [[package]] name = "trie-bench" version = "0.33.0" @@ -10594,7 +10679,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "004e1e8f92535694b4cb1444dc5a8073ecf0815e3357f729638b9f8fc4062908" dependencies = [ "hash-db", - "hashbrown 0.12.3", + "hashbrown", "log", "rustc-hex", "smallvec", @@ -10632,7 +10717,7 @@ dependencies = [ "futures-channel", "futures-io", "futures-util", - "idna", + "idna 0.2.3", "ipnet", "lazy_static", "rand 0.8.5", @@ -10674,7 +10759,7 @@ checksum = "59547bce71d9c38b83d9c0e92b6066c4253371f15005def0c30d9657f50c7642" name = "try-runtime-cli" version = "0.10.0-dev" dependencies = [ - "clap 4.0.11", + "clap 4.0.32", "frame-remote-externalities", "frame-try-runtime", "hex", @@ -10703,9 +10788,9 @@ dependencies = [ [[package]] name = "trybuild" -version = "1.0.60" +version = "1.0.73" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0da18123d1316f5a65fc9b94e30a0fcf58afb1daff1b8e18f41dc30f5bfc38c8" +checksum = "ed01de3de062db82c0920b5cabe804f88d599a3f217932292597c678c903754d" dependencies = [ "dissimilar", "glob", @@ -10719,9 +10804,9 @@ dependencies = [ [[package]] name = "tt-call" -version = "1.0.8" +version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e66dcbec4290c69dd03c57e76c2469ea5c7ce109c6dd4351c13055cf71ea055" +checksum = "f4f195fd851901624eee5a58c4bb2b4f06399148fcd0ed336e6f1cb60a9881df" [[package]] name = "twox-hash" @@ -10730,28 +10815,28 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "97fee6b57c6a41524a810daee9286c02d7752c4253064d0b05472833a438f675" dependencies = [ "cfg-if", - "digest 0.10.3", + "digest 0.10.6", "rand 0.8.5", "static_assertions", ] [[package]] name = "typenum" -version = "1.15.0" +version = "1.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dcf81ac59edc17cc8697ff311e8f5ef2d99fcbd9817b34cec66f90b6c3dfd987" +checksum = "497961ef93d974e23eb6f433eb5fe1b7930b659f06d12dec6fc44a8f554c0bba" [[package]] name = "ucd-trie" -version = "0.1.3" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56dee185309b50d1f11bfedef0fe6d036842e3fb77413abef29f8f8d1c5d4c1c" +checksum = "9e79c4d996edb816c91e4308506774452e55e95c3c9de07b6729e17e15a5ef81" [[package]] name = "uint" -version = "0.9.0" +version = "0.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e11fe9a9348741cf134085ad57c249508345fe16411b3d7fb4ff2da2f1d6382e" +checksum = "76f64bba2c53b04fcab63c01a7d7427eadc821e3bc48c34dc9ba29c501164b52" dependencies = [ "byteorder", "crunchy", @@ -10761,18 +10846,15 @@ dependencies = [ [[package]] name = "unicode-bidi" -version = "0.3.4" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49f2bd0c6468a8230e1db229cff8029217cf623c767ea5d60bfbd42729ea54d5" -dependencies = [ - "matches", -] +checksum = "099b7128301d285f79ddd55b9a83d5e6b9e97c92e0ea0daebee7263e932de992" [[package]] name = "unicode-ident" -version = "1.0.1" +version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5bd2fe26506023ed7b5e1e315add59d6f584c621d037f9368fea9cfb988f368c" +checksum = "84a22b9f218b40614adcb3f4ff08b703773ad44fa9423e4e0d346d5db86e4ebc" [[package]] name = "unicode-normalization" @@ -10785,15 +10867,15 @@ dependencies = [ [[package]] name = "unicode-width" -version = "0.1.8" +version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9337591893a19b88d8d87f2cec1e73fad5cdfd10e5a6f349f498ad6ea2ffb1e3" +checksum = "c0edd1e5b14653f783770bce4a4dabb4a5108a5370a5f5d8cfe8710c361f6c8b" [[package]] name = "unicode-xid" -version = "0.2.3" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "957e51f3646910546462e67d5f7599b9e4fb8acdd304b087a6494730f9eebf04" +checksum = "f962df74c8c05a667b5ee8bcf162993134c104e96440b663c8daa176dc772d8c" [[package]] name = "universal-hash" @@ -10801,7 +10883,7 @@ version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9f214e8f697e925001e66ec2c6e37a4ef93f0f78c2eed7814394e10c62025b05" dependencies = [ - "generic-array 0.14.4", + "generic-array 0.14.6", "subtle", ] @@ -10825,13 +10907,12 @@ checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a" [[package]] name = "url" -version = "2.2.1" +version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ccd964113622c8e9322cfac19eb1004a07e636c545f325da085d5cdde6f1f8b" +checksum = "0d68c799ae75762b8c3fe375feb6600ef5602c883c5d21eb51c09f22b83c4643" dependencies = [ "form_urlencoded", - "idna", - "matches", + "idna 0.3.0", "percent-encoding", ] @@ -10843,21 +10924,15 @@ checksum = "830b7e5d4d90034032940e4ace0d9a9a057e7a45cd94e6c007832e39edb82f6d" [[package]] name = "vcpkg" -version = "0.2.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b00bca6106a5e23f3eee943593759b7fcddb00554332e856d990c893966879fb" - -[[package]] -name = "vec-arena" -version = "1.0.0" +version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eafc1b9b2dfc6f5529177b62cf806484db55b32dc7c9658a118e11bbeb33061d" +checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" [[package]] name = "version_check" -version = "0.9.2" +version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b5a972e5669d67ba988ce3dc826706fb0a8b01471c088cb0b6110b805cc36aed" +checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" [[package]] name = "void" @@ -10921,9 +10996,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.77" +version = "0.2.83" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e68338db6becec24d3c7977b5bf8a48be992c934b5d07177e3931f5dc9b076c" +checksum = "eaf9f5aceeec8be17c128b2e93e031fb8a4d469bb9c4ae2d7dc1888b26887268" dependencies = [ "cfg-if", "wasm-bindgen-macro", @@ -10931,13 +11006,13 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.77" +version = "0.2.83" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f34c405b4f0658583dba0c1c7c9b694f3cac32655db463b56c254a1c75269523" +checksum = "4c8ffb332579b0557b52d268b91feab8df3615f265d5270fec2a8c95b17c1142" dependencies = [ "bumpalo", - "lazy_static", "log", + "once_cell", "proc-macro2", "quote", "syn", @@ -10946,9 +11021,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-futures" -version = "0.4.20" +version = "0.4.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3de431a2910c86679c34283a33f66f4e4abd7e0aec27b6669060148872aadf94" +checksum = "23639446165ca5a5de86ae1d8896b737ae80319560fbaa4c2887b7da6e7ebd7d" dependencies = [ "cfg-if", "js-sys", @@ -10958,9 +11033,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.77" +version = "0.2.83" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9d5a6580be83b19dc570a8f9c324251687ab2184e57086f71625feb57ec77c8" +checksum = "052be0f94026e6cbc75cdefc9bae13fd6052cdcaf532fa6c45e7ae33a1e6c810" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -10968,9 +11043,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.77" +version = "0.2.83" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3775a030dc6f5a0afd8a84981a21cc92a781eb429acef9ecce476d0c9113e92" +checksum = "07bc0c051dc5f23e307b13285f9d75df86bfdf816c5721e573dec1f9b8aa193c" dependencies = [ "proc-macro2", "quote", @@ -10981,9 +11056,18 @@ dependencies = [ [[package]] name = "wasm-bindgen-shared" -version = "0.2.77" +version = "0.2.83" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c279e376c7a8e8752a8f1eaa35b7b0bee6bb9fb0cdacfa97cc3f1f289c87e2b4" +checksum = "1c38c045535d93ec4f0b4defec448e4291638ee608530863b1e2ba115d4fff7f" + +[[package]] +name = "wasm-encoder" +version = "0.20.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05632e0a66a6ed8cca593c24223aabd6262f256c3693ad9822c315285f010614" +dependencies = [ + "leb128", +] [[package]] name = "wasm-instrument" @@ -11061,13 +11145,13 @@ dependencies = [ [[package]] name = "wasmi" -version = "0.13.0" +version = "0.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc13b3c219ca9aafeec59150d80d89851df02e0061bc357b4d66fc55a8d38787" +checksum = "06c326c93fbf86419608361a2c925a31754cf109da1b8b55737070b4d6669422" dependencies = [ "parity-wasm", "wasmi-validation", - "wasmi_core 0.2.0", + "wasmi_core 0.2.1", ] [[package]] @@ -11099,12 +11183,12 @@ checksum = "a1ea379cbb0b41f3a9f0bf7b47036d036aae7f43383d8cc487d4deccf40dee0a" [[package]] name = "wasmi_core" -version = "0.2.0" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0a088e8c4c59c6f2b9eae169bf86328adccc477c00b56d3661e3e9fb397b184" +checksum = "57d20cb3c59b788653d99541c646c561c9dd26506f25c0cebfe810659c54c6d7" dependencies = [ "downcast-rs", - "libm", + "libm 0.2.6", "memory_units", "num-rational", "num-traits", @@ -11117,7 +11201,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c5bf998ab792be85e20e771fe14182b4295571ad1d4f89d3da521c1bef5f597a" dependencies = [ "downcast-rs", - "libm", + "libm 0.2.6", "num-traits", ] @@ -11141,9 +11225,9 @@ dependencies = [ [[package]] name = "wasmtime" -version = "1.0.0" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a10dc9784d8c3a33c970e3939180424955f08af2e7f20368ec02685a0e8f065" +checksum = "4ad5af6ba38311282f2a21670d96e78266e8c8e2f38cbcd52c254df6ccbc7731" dependencies = [ "anyhow", "bincode", @@ -11169,18 +11253,18 @@ dependencies = [ [[package]] name = "wasmtime-asm-macros" -version = "1.0.0" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee4dbdc6daf68528cad1275ac91e3f51848ce9824385facc94c759f529decdf8" +checksum = "45de63ddfc8b9223d1adc8f7b2ee5f35d1f6d112833934ad7ea66e4f4339e597" dependencies = [ "cfg-if", ] [[package]] name = "wasmtime-cache" -version = "1.0.0" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f507f3fa1ee1b2f9a83644e2514242b1dfe580782c0eb042f1ef70255bc4ffe" +checksum = "bcd849399d17d2270141cfe47fa0d91ee52d5f8ea9b98cf7ddde0d53e5f79882" dependencies = [ "anyhow", "base64", @@ -11188,9 +11272,9 @@ dependencies = [ "directories-next", "file-per-thread-logger", "log", - "rustix", + "rustix 0.35.13", "serde", - "sha2 0.9.8", + "sha2 0.9.9", "toml", "windows-sys 0.36.1", "zstd", @@ -11198,9 +11282,9 @@ dependencies = [ [[package]] name = "wasmtime-cranelift" -version = "1.0.0" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f03cf79d982fc68e94ba0bea6a300a3b94621c4eb9705eece0a4f06b235a3b5" +checksum = "4bd91339b742ff20bfed4532a27b73c86b5bcbfedd6bea2dcdf2d64471e1b5c6" dependencies = [ "anyhow", "cranelift-codegen", @@ -11208,7 +11292,7 @@ dependencies = [ "cranelift-frontend", "cranelift-native", "cranelift-wasm", - "gimli 0.26.1", + "gimli 0.26.2", "log", "object 0.29.0", "target-lexicon", @@ -11219,13 +11303,13 @@ dependencies = [ [[package]] name = "wasmtime-environ" -version = "1.0.0" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c587c62e91c5499df62012b87b88890d0eb470b2ffecc5964e9da967b70c77c" +checksum = "ebb881c61f4f627b5d45c54e629724974f8a8890d455bcbe634330cc27309644" dependencies = [ "anyhow", "cranelift-entity", - "gimli 0.26.1", + "gimli 0.26.2", "indexmap", "log", "object 0.29.0", @@ -11238,20 +11322,20 @@ dependencies = [ [[package]] name = "wasmtime-jit" -version = "1.0.0" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "047839b5dabeae5424a078c19b8cc897e5943a7fadc69e3d888b9c9a897666b3" +checksum = "1985c628011fe26adf5e23a5301bdc79b245e0e338f14bb58b39e4e25e4d8681" dependencies = [ "addr2line 0.17.0", "anyhow", "bincode", "cfg-if", "cpp_demangle", - "gimli 0.26.1", + "gimli 0.26.2", "log", "object 0.29.0", "rustc-demangle", - "rustix", + "rustix 0.35.13", "serde", "target-lexicon", "thiserror", @@ -11263,20 +11347,20 @@ dependencies = [ [[package]] name = "wasmtime-jit-debug" -version = "1.0.0" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b299569abf6f99b7b8e020afaf84a700e8636c6a42e242069267322cd5818235" +checksum = "f671b588486f5ccec8c5a3dba6b4c07eac2e66ab8c60e6f4e53717c77f709731" dependencies = [ "object 0.29.0", "once_cell", - "rustix", + "rustix 0.35.13", ] [[package]] name = "wasmtime-runtime" -version = "1.0.0" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae79e0515160bd5abee5df50a16c4eb8db9f71b530fc988ae1d9ce34dcb8dd01" +checksum = "ee8f92ad4b61736339c29361da85769ebc200f184361959d1792832e592a1afd" dependencies = [ "anyhow", "cc", @@ -11286,10 +11370,10 @@ dependencies = [ "log", "mach", "memfd", - "memoffset", + "memoffset 0.6.5", "paste", "rand 0.8.5", - "rustix", + "rustix 0.35.13", "thiserror", "wasmtime-asm-macros", "wasmtime-environ", @@ -11299,9 +11383,9 @@ dependencies = [ [[package]] name = "wasmtime-types" -version = "1.0.0" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "790cf43ee8e2d5dad1780af30f00d7a972b74725fb1e4f90c28d62733819b185" +checksum = "d23d61cb4c46e837b431196dd06abb11731541021916d03476a178b54dc07aeb" dependencies = [ "cranelift-entity", "serde", @@ -11311,27 +11395,30 @@ dependencies = [ [[package]] name = "wast" -version = "38.0.0" +version = "50.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ebc29df4629f497e0893aacd40f13a4a56b85ef6eb4ab6d603f07244f1a7bf2" +checksum = "a2cbb59d4ac799842791fe7e806fa5dbbf6b5554d538e51cc8e176db6ff0ae34" dependencies = [ "leb128", + "memchr", + "unicode-width", + "wasm-encoder", ] [[package]] name = "wat" -version = "1.0.40" +version = "1.0.52" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "adcfaeb27e2578d2c6271a45609f4a055e6d7ba3a12eff35b1fd5ba147bdf046" +checksum = "584aaf7a1ecf4d383bbe1a25eeab0cbb8ff96acc6796707ff65cde48f4632f15" dependencies = [ "wast", ] [[package]] name = "web-sys" -version = "0.3.54" +version = "0.3.60" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a84d70d1ec7d2da2d26a5bd78f4bca1b8c3254805363ce743b7a05bc30d195a" +checksum = "bcda906d8be16e728fd5adc5b729afad4e444e106ab28cd1c7256e54fa61510f" dependencies = [ "js-sys", "wasm-bindgen", @@ -11349,9 +11436,9 @@ dependencies = [ [[package]] name = "webpki-roots" -version = "0.22.2" +version = "0.22.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "552ceb903e957524388c4d3475725ff2c8b7960922063af6ce53c9a43da07449" +checksum = "b6c71e40d7d2c34a5106301fb632274ca37242cd0c9d3e64dbece371a40a2d87" dependencies = [ "webpki", ] @@ -11367,12 +11454,13 @@ dependencies = [ [[package]] name = "which" -version = "4.0.2" +version = "4.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87c14ef7e1b8b8ecfc75d5eca37949410046e66f15d185c01d70824f1f8111ef" +checksum = "1c831fbbee9e129a8cf93e7747a82da9d95ba8e16621cae60ec2cdc849bacb7b" dependencies = [ + "either", "libc", - "thiserror", + "once_cell", ] [[package]] @@ -11425,19 +11513,6 @@ dependencies = [ "windows_x86_64_msvc 0.34.0", ] -[[package]] -name = "windows-sys" -version = "0.32.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3df6e476185f92a12c072be4a189a0210dcdcf512a1891d6dff9edb874deadc6" -dependencies = [ - "windows_aarch64_msvc 0.32.0", - "windows_i686_gnu 0.32.0", - "windows_i686_msvc 0.32.0", - "windows_x86_64_gnu 0.32.0", - "windows_x86_64_msvc 0.32.0", -] - [[package]] name = "windows-sys" version = "0.36.1" @@ -11452,10 +11527,25 @@ dependencies = [ ] [[package]] -name = "windows_aarch64_msvc" -version = "0.32.0" +name = "windows-sys" +version = "0.42.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a3e1820f08b8513f676f7ab6c1f99ff312fb97b553d30ff4dd86f9f15728aa7" +dependencies = [ + "windows_aarch64_gnullvm", + "windows_aarch64_msvc 0.42.0", + "windows_i686_gnu 0.42.0", + "windows_i686_msvc 0.42.0", + "windows_x86_64_gnu 0.42.0", + "windows_x86_64_gnullvm", + "windows_x86_64_msvc 0.42.0", +] + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.42.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8e92753b1c443191654ec532f14c199742964a061be25d77d7a96f09db20bf5" +checksum = "41d2aa71f6f0cbe00ae5167d90ef3cfe66527d6f613ca78ac8024c3ccab9a19e" [[package]] name = "windows_aarch64_msvc" @@ -11470,10 +11560,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9bb8c3fd39ade2d67e9874ac4f3db21f0d710bee00fe7cab16949ec184eeaa47" [[package]] -name = "windows_i686_gnu" -version = "0.32.0" +name = "windows_aarch64_msvc" +version = "0.42.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a711c68811799e017b6038e0922cb27a5e2f43a2ddb609fe0b6f3eeda9de615" +checksum = "dd0f252f5a35cac83d6311b2e795981f5ee6e67eb1f9a7f64eb4500fbc4dcdb4" [[package]] name = "windows_i686_gnu" @@ -11488,10 +11578,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "180e6ccf01daf4c426b846dfc66db1fc518f074baa793aa7d9b9aaeffad6a3b6" [[package]] -name = "windows_i686_msvc" -version = "0.32.0" +name = "windows_i686_gnu" +version = "0.42.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "146c11bb1a02615db74680b32a68e2d61f553cc24c4eb5b4ca10311740e44172" +checksum = "fbeae19f6716841636c28d695375df17562ca208b2b7d0dc47635a50ae6c5de7" [[package]] name = "windows_i686_msvc" @@ -11506,10 +11596,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e2e7917148b2812d1eeafaeb22a97e4813dfa60a3f8f78ebe204bcc88f12f024" [[package]] -name = "windows_x86_64_gnu" -version = "0.32.0" +name = "windows_i686_msvc" +version = "0.42.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c912b12f7454c6620635bbff3450962753834be2a594819bd5e945af18ec64bc" +checksum = "84c12f65daa39dd2babe6e442988fc329d6243fdce47d7d2d155b8d874862246" [[package]] name = "windows_x86_64_gnu" @@ -11524,10 +11614,16 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4dcd171b8776c41b97521e5da127a2d86ad280114807d0b2ab1e462bc764d9e1" [[package]] -name = "windows_x86_64_msvc" -version = "0.32.0" +name = "windows_x86_64_gnu" +version = "0.42.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bf7b1b21b5362cbc318f686150e5bcea75ecedc74dd157d874d754a2ca44b0ed" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.42.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "504a2476202769977a040c6364301a3f65d0cc9e3fb08600b2bda150a0488316" +checksum = "09d525d2ba30eeb3297665bd434a54297e4170c7f1a44cad4ef58095b4cd2028" [[package]] name = "windows_x86_64_msvc" @@ -11541,20 +11637,26 @@ version = "0.36.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c811ca4a8c853ef420abd8592ba53ddbbac90410fab6903b3e79972a631f7680" +[[package]] +name = "windows_x86_64_msvc" +version = "0.42.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f40009d85759725a34da6d89a94e63d7bdc50a862acf0dbc7c8e488f1edcb6f5" + [[package]] name = "winreg" -version = "0.7.0" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0120db82e8a1e0b9fb3345a539c478767c0048d842860994d96113d5b667bd69" +checksum = "80d0f4e272c85def139476380b12f9ac60926689dd2e01d4923222f40580869d" dependencies = [ "winapi", ] [[package]] name = "wyz" -version = "0.5.0" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30b31594f29d27036c383b53b59ed3476874d518f0efb151b27a4c275141390e" +checksum = "05f360fc0b24296329c78fda852a1e9ae82de9cf7b27dae4b7f62f118f77b9ed" dependencies = [ "tap", ] @@ -11565,7 +11667,7 @@ version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5a0c105152107e3b96f6a00a65e86ce82d9b125230e1c4302940eca58ff71f4f" dependencies = [ - "curve25519-dalek 3.0.2", + "curve25519-dalek 3.2.0", "rand_core 0.5.1", "zeroize", ] @@ -11584,6 +11686,12 @@ dependencies = [ "static_assertions", ] +[[package]] +name = "yansi" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09041cd90cf85f7f8b2df60c646f853b7f535ce68f85244eb6731cf89fa498ec" + [[package]] name = "zeroize" version = "1.5.7" @@ -11595,9 +11703,9 @@ dependencies = [ [[package]] name = "zeroize_derive" -version = "1.3.2" +version = "1.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f8f187641dad4f680d25c4bfc4225b418165984179f26ca76ec4fb6441d3a17" +checksum = "44bf07cb3e50ea2003396695d58bf46bc9887a1f362260446fad6bc4e79bd36c" dependencies = [ "proc-macro2", "quote", @@ -11626,9 +11734,9 @@ dependencies = [ [[package]] name = "zstd-sys" -version = "2.0.1+zstd.1.5.2" +version = "2.0.4+zstd.1.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9fd07cbbc53846d9145dbffdf6dd09a7a0aa52be46741825f5c97bdd4f73f12b" +checksum = "4fa202f2ef00074143e219d15b62ffc317d17cc33909feac471c044087cad7b0" dependencies = [ "cc", "libc", diff --git a/Cargo.toml b/Cargo.toml index 923cb83aceb91..d81f23199ae7b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -123,7 +123,7 @@ members = [ "frame/proxy", "frame/message-queue", "frame/nfts", - "frame/nft-fractionalisation", + "frame/nft-fractionalization", "frame/nomination-pools", "frame/nomination-pools/fuzzer", "frame/nomination-pools/benchmarking", diff --git a/bin/node/runtime/Cargo.toml b/bin/node/runtime/Cargo.toml index d78f28ededb15..cee07693e99a9 100644 --- a/bin/node/runtime/Cargo.toml +++ b/bin/node/runtime/Cargo.toml @@ -79,7 +79,7 @@ pallet-message-queue = { version = "7.0.0-dev", default-features = false, path = pallet-mmr = { version = "4.0.0-dev", default-features = false, path = "../../../frame/merkle-mountain-range" } pallet-multisig = { version = "4.0.0-dev", default-features = false, path = "../../../frame/multisig" } pallet-nfts = { version = "4.0.0-dev", default-features = false, path = "../../../frame/nfts" } -pallet-nft-fractionalisation = { version = "4.0.0-dev", default-features = false, path = "../../../frame/nft-fractionalisation" } +pallet-nft-fractionalization = { version = "4.0.0-dev", default-features = false, path = "../../../frame/nft-fractionalization" } pallet-nomination-pools = { version = "1.0.0", default-features = false, path = "../../../frame/nomination-pools"} pallet-nomination-pools-benchmarking = { version = "1.0.0", default-features = false, optional = true, path = "../../../frame/nomination-pools/benchmarking" } pallet-nomination-pools-runtime-api = { version = "1.0.0-dev", default-features = false, path = "../../../frame/nomination-pools/runtime-api" } @@ -200,7 +200,7 @@ std = [ "pallet-recovery/std", "pallet-uniques/std", "pallet-nfts/std", - "pallet-nft-fractionalisation/std", + "pallet-nft-fractionalization/std", "pallet-vesting/std", "log/std", "frame-try-runtime?/std", @@ -258,6 +258,7 @@ runtime-benchmarks = [ "pallet-utility/runtime-benchmarks", "pallet-uniques/runtime-benchmarks", "pallet-nfts/runtime-benchmarks", + "pallet-nft-fractionalization/runtime-benchmarks", "pallet-vesting/runtime-benchmarks", "pallet-whitelist/runtime-benchmarks", "frame-system-benchmarking/runtime-benchmarks", @@ -318,7 +319,7 @@ try-runtime = [ "pallet-transaction-storage/try-runtime", "pallet-uniques/try-runtime", "pallet-nfts/try-runtime", - "pallet-nft-fractionalisation/try-runtime", + "pallet-nft-fractionalization/try-runtime", "pallet-vesting/try-runtime", "pallet-whitelist/try-runtime", ] diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index 3f9ce2f76c4b3..c4f4c1c4163ef 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -32,10 +32,14 @@ use frame_support::{ pallet_prelude::Get, parameter_types, traits::{ - fungible::ItemOf, AsEnsureOriginWithArg, ConstBool, ConstU128, ConstU16, ConstU32, - Currency, EitherOfDiverse, EqualPrivilegeOnly, Everything, Imbalance, InstanceFilter, - KeyOwnerProofSystem, LockIdentifier, Nothing, OnUnbalanced, U128CurrencyToVote, - WithdrawReasons, + fungible::ItemOf, + tokens::{ + nonfungibles_v2::{Inspect, LockableNonfungible, Mutate}, + AttributeNamespace, + }, + AsEnsureOriginWithArg, ConstBool, ConstU128, ConstU16, ConstU32, Currency, EitherOfDiverse, + EqualPrivilegeOnly, Everything, Imbalance, InstanceFilter, KeyOwnerProofSystem, + LockIdentifier, Locker, Nothing, OnUnbalanced, U128CurrencyToVote, WithdrawReasons, }, weights::{ constants::{ @@ -56,7 +60,7 @@ use pallet_grandpa::{ fg_primitives, AuthorityId as GrandpaId, AuthorityList as GrandpaAuthorityList, }; use pallet_im_online::sr25519::AuthorityId as ImOnlineId; -use pallet_nfts::PalletFeatures; +use pallet_nfts::{ItemConfig, PalletFeatures}; use pallet_nis::WithMaximumOf; use pallet_session::historical::{self as pallet_session_historical}; pub use pallet_transaction_payment::{CurrencyAdapter, Multiplier, TargetedFeeAdjustment}; @@ -74,7 +78,8 @@ use sp_runtime::{ SaturatedConversion, StaticLookup, }, transaction_validity::{TransactionPriority, TransactionSource, TransactionValidity}, - ApplyExtrinsicResult, FixedPointNumber, FixedU128, Perbill, Percent, Permill, Perquintill, + ApplyExtrinsicResult, DispatchResult, FixedPointNumber, FixedU128, Perbill, Percent, Permill, + Perquintill, }; use sp_std::prelude::*; #[cfg(any(feature = "std", test))] @@ -93,8 +98,6 @@ pub use pallet_sudo::Call as SudoCall; #[cfg(any(feature = "std", test))] pub use sp_runtime::BuildStorage; -pub use pallet_nft_fractionalisation; - /// Implementations of some helper traits passed into runtime modules as associated types. pub mod impls; #[cfg(not(feature = "runtime-benchmarks"))] @@ -1562,6 +1565,17 @@ parameter_types! { pub Features: PalletFeatures = PalletFeatures::all_enabled(); } +const LOCKED_NFT_KEY: &[u8; 6] = b"locked"; +type ItemId = ::ItemId; +type CollectionId = ::CollectionId; + +pub struct NftLocker; +impl Locker for NftLocker { + fn is_locked(collection: CollectionId, item: ItemId) -> bool { + Nfts::attribute(&collection, &item, &AttributeNamespace::Pallet, LOCKED_NFT_KEY).is_some() + } +} + impl pallet_nfts::Config for Runtime { type RuntimeEvent = RuntimeEvent; type CollectionId = u32; @@ -1585,25 +1599,40 @@ impl pallet_nfts::Config for Runtime { #[cfg(feature = "runtime-benchmarks")] type Helper = (); type CreateOrigin = AsEnsureOriginWithArg>; - type Locker = (); + type Locker = NftLocker; } parameter_types! { pub const NftFractionsPalletId: PalletId = PalletId(*b"fraction"); - pub const BuybackThreshold: u32 = 1; } -impl pallet_nft_fractionalisation::Config for Runtime { +pub struct RuntimeLockableNonfungible; +impl LockableNonfungible for RuntimeLockableNonfungible { + fn lock(collection: &CollectionId, item: &ItemId) -> DispatchResult { + >::set_attribute( + collection, + item, + LOCKED_NFT_KEY, + &[1], + ) + } + fn unlock(collection: &CollectionId, item: &ItemId) -> DispatchResult { + >::clear_attribute(collection, item, LOCKED_NFT_KEY) + } +} + +impl pallet_nft_fractionalization::Config for Runtime { type RuntimeEvent = RuntimeEvent; type PalletId = NftFractionsPalletId; type Currency = Balances; - type BuybackThreshold = BuybackThreshold; - type CollectionId = ::CollectionId; - type ItemId = ::ItemId; + type NftCollectionId = ::CollectionId; + type NftId = ::ItemId; type AssetBalance = ::Balance; - type Assets = Assets; - type Items = Nfts; type AssetId = ::AssetId; + type Assets = Assets; + type Nfts = Nfts; + type NftLocker = RuntimeLockableNonfungible; + type WeightInfo = pallet_nft_fractionalization::weights::SubstrateWeight; } impl pallet_transaction_storage::Config for Runtime { @@ -1761,7 +1790,7 @@ construct_runtime!( Nis: pallet_nis, Uniques: pallet_uniques, Nfts: pallet_nfts, - NftFractions: pallet_nft_fractionalisation, + NftFractions: pallet_nft_fractionalization, TransactionStorage: pallet_transaction_storage, VoterList: pallet_bags_list::, StateTrieMigration: pallet_state_trie_migration, @@ -1894,6 +1923,7 @@ mod benches { [pallet_treasury, Treasury] [pallet_uniques, Uniques] [pallet_nfts, Nfts] + [pallet_nft_fractionalization, NftFractions] [pallet_utility, Utility] [pallet_vesting, Vesting] [pallet_whitelist, Whitelist] diff --git a/frame/assets/src/functions.rs b/frame/assets/src/functions.rs index 2cdf2d7c4c8f6..cf4caa4fea68f 100644 --- a/frame/assets/src/functions.rs +++ b/frame/assets/src/functions.rs @@ -899,9 +899,7 @@ impl, I: 'static> Pallet { ensure!(metadata.as_ref().map_or(true, |m| !m.is_frozen), Error::::NoPermission); let old_deposit = metadata.take().map_or(Zero::zero(), |m| m.deposit); - let new_deposit = T::MetadataDepositPerByte::get() - .saturating_mul(((name.len() + symbol.len()) as u32).into()) - .saturating_add(T::MetadataDepositBase::get()); + let new_deposit = Self::calc_metadata_deposit(&name, &symbol); if new_deposit > old_deposit { T::Currency::reserve(from, new_deposit - old_deposit)?; @@ -927,4 +925,14 @@ impl, I: 'static> Pallet { Ok(()) }) } + + /// Calculate the metadata deposit for the provided data. + pub(super) fn calc_metadata_deposit( + name: &Vec, + symbol: &Vec, + ) -> DepositBalanceOf { + T::MetadataDepositPerByte::get() + .saturating_mul(((name.len() + symbol.len()) as u32).into()) + .saturating_add(T::MetadataDepositBase::get()) + } } diff --git a/frame/assets/src/impl_fungibles.rs b/frame/assets/src/impl_fungibles.rs index b10b8c6b10755..d35c60113d778 100644 --- a/frame/assets/src/impl_fungibles.rs +++ b/frame/assets/src/impl_fungibles.rs @@ -231,6 +231,19 @@ impl, I: 'static> fungibles::metadata::Mutate<:: } } +impl, I: 'static> + fungibles::metadata::CalcMetadataDeposit< + ::AccountId>>::Balance, + > for Pallet +{ + fn calc( + name: &Vec, + symbol: &Vec, + ) -> ::AccountId>>::Balance { + Self::calc_metadata_deposit(name, symbol) + } +} + impl, I: 'static> fungibles::approvals::Inspect<::AccountId> for Pallet { diff --git a/frame/nft-fractionalisation/README.md b/frame/nft-fractionalisation/README.md deleted file mode 100644 index 5331a7a737563..0000000000000 --- a/frame/nft-fractionalisation/README.md +++ /dev/null @@ -1,4 +0,0 @@ -### Lock NFT - -Lock an NFT from `pallet-uniques`, automatically mint an fungible -asset from `pallet-assets`. \ No newline at end of file diff --git a/frame/nft-fractionalisation/src/lib.rs b/frame/nft-fractionalisation/src/lib.rs deleted file mode 100644 index dd0aecb1fccf6..0000000000000 --- a/frame/nft-fractionalisation/src/lib.rs +++ /dev/null @@ -1,298 +0,0 @@ -#![cfg_attr(not(feature = "std"), no_std)] - -pub use pallet::*; - -#[cfg(test)] -mod mock; - -#[cfg(test)] -mod tests; - -// #[cfg(feature = "runtime-benchmarks")] -// mod benchmarking; - -pub use scale_info::Type; - -pub type ItemId = ::ItemId; -pub type CollectionId = ::CollectionId; - -#[frame_support::pallet] -pub mod pallet { - use frame_support::pallet_prelude::*; - use frame_system::pallet_prelude::*; - - use frame_support::{ - dispatch::DispatchResult, - sp_runtime::traits::{ - AccountIdConversion, AtLeast32BitUnsigned, IntegerSquareRoot, SaturatedConversion, - Saturating, StaticLookup, Zero, - }, - traits::{ - fungibles::{ - metadata::Mutate as MutateMetadata, Create, Inspect, InspectEnumerable, Mutate, - Transfer, - }, - tokens::nonfungibles_v2::{ - Inspect as NonFungiblesInspect, Transfer as NonFungiblesTransfer, - }, - Currency, - }, - PalletId, - }; - - pub use pallet_nfts::Incrementable; - - pub type BalanceOf = - <::Currency as Currency<::AccountId>>::Balance; - pub type AssetIdOf = - <::Assets as Inspect<::AccountId>>::AssetId; - pub type AssetBalanceOf = - <::Assets as Inspect<::AccountId>>::Balance; - - pub type AccountIdLookupOf = <::Lookup as StaticLookup>::Source; - - #[pallet::pallet] - #[pallet::generate_store(pub(super) trait Store)] - pub struct Pallet(_); - - #[pallet::config] - pub trait Config: frame_system::Config { - type RuntimeEvent: From> + IsType<::RuntimeEvent>; - - type Currency: Currency; - - /// Identifier for the collection of item. - type CollectionId: Member + Parameter + MaxEncodedLen + Copy + Incrementable; - - /// The type used to identify a unique item within a collection. - type ItemId: Member + Parameter + MaxEncodedLen + Copy; - - type AssetBalance: AtLeast32BitUnsigned - + codec::FullCodec - + Copy - + MaybeSerializeDeserialize - + sp_std::fmt::Debug - + Default - + From - + IntegerSquareRoot - + Zero - + TypeInfo - + MaxEncodedLen; - - type AssetId: Member - + Parameter - + Default - + Copy - + codec::HasCompact - + From - + MaybeSerializeDeserialize - + MaxEncodedLen - + PartialOrd - + TypeInfo; - - type Assets: Inspect - + Create - + InspectEnumerable - + Mutate - + MutateMetadata - + Transfer; - - type Items: NonFungiblesInspect< - Self::AccountId, - ItemId = Self::ItemId, - CollectionId = Self::CollectionId, - > + NonFungiblesTransfer; - - #[pallet::constant] - type PalletId: Get; - - #[pallet::constant] - type BuybackThreshold: Get; - } - - #[pallet::storage] - #[pallet::getter(fn assets_minted)] - // TODO: query amount minted from pallet assets instead of storing it locally. - // Add a public getter function to pallet assets. - pub type AssetsMinted = - StorageMap<_, Twox64Concat, AssetIdOf, AssetBalanceOf, OptionQuery>; - - #[pallet::storage] - #[pallet::getter(fn asset_to_nft)] - // TODO: store information about Asset ID and the corresponding Collection and Item ID. - pub type AssetToNft = - StorageMap<_, Twox64Concat, AssetIdOf, (T::CollectionId, T::ItemId), OptionQuery>; - - #[pallet::event] - #[pallet::generate_deposit(pub(super) fn deposit_event)] - pub enum Event { - NFTLocked(T::CollectionId, T::ItemId), - NFTUnlocked(T::CollectionId, T::ItemId), - } - - #[pallet::error] - pub enum Error { - AssetDataNotFound, - NFTDataNotFound, - } - - #[pallet::call] - impl Pallet { - // TODO: correct weights - #[pallet::weight(10_000 + T::DbWeight::get().writes(2).ref_time())] - /// Pallet's account must be funded before lock is possible! - /// 5EYCAe5gjC5dxKPbV2GPQUetETjFNSYZsSwSurVTTXidSLbh - pub fn lock_nft_create_asset( - origin: OriginFor, - collection_id: T::CollectionId, - item_id: T::ItemId, - asset_id: AssetIdOf, - beneficiary: T::AccountId, - min_balance: AssetBalanceOf, - amount: AssetBalanceOf, - ) -> DispatchResult { - let _who = ensure_signed(origin)?; - let admin_account_id = Self::pallet_account_id(); - - Self::do_lock_nft(collection_id, item_id)?; - Self::do_create_asset(asset_id, admin_account_id, min_balance)?; - Self::do_mint_asset(asset_id, &beneficiary, amount)?; - - // Mutate this storage item to retain information about the amount minted. - >::try_mutate( - asset_id, - |assets_minted| -> Result<(), DispatchError> { - match assets_minted.is_some() { - true => - *assets_minted = Some(assets_minted.unwrap().saturating_add(amount)), - false => *assets_minted = Some(amount), - } - - Ok(()) - }, - )?; - - // Mutate this storage item to retain information about the asset created. - >::try_mutate(asset_id, |nft_id| -> Result<(), DispatchError> { - *nft_id = Some((collection_id, item_id)); - - Ok(()) - })?; - - Self::deposit_event(Event::NFTLocked(collection_id, item_id)); - - Ok(()) - } - - /// Return and burn a % of the asset, unlock the NFT. Currently 100% is the minimum - /// threshold. - // TODO: correct weights - #[pallet::weight(10_000 + T::DbWeight::get().writes(2).ref_time())] - pub fn burn_asset_unlock_nft( - origin: OriginFor, - asset_id: AssetIdOf, - amount: AssetBalanceOf, - ) -> DispatchResult { - let who = ensure_signed(origin)?; - - ensure!(>::contains_key(asset_id), Error::::NFTDataNotFound); - let (collection_id, item_id) = Self::get_nft_id(asset_id); - Self::do_burn_asset(asset_id, &who, amount)?; - - // Mutate this storage item to retain information about the amount burned. - >::try_mutate( - asset_id, - |assets_minted| -> Result<(), DispatchError> { - *assets_minted = Some(assets_minted.unwrap().saturating_sub(amount)); - Ok(()) - }, - )?; - - Self::do_unlock_nft(collection_id, item_id, asset_id, who)?; - - Self::deposit_event(Event::NFTUnlocked(collection_id, item_id)); - - Ok(()) - } - } - - impl Pallet { - fn pallet_account_id() -> T::AccountId { - T::PalletId::get().into_account_truncating() - } - - /// Transfer the NFT from the account locking the NFT to the pallet's account. - fn do_lock_nft(collection_id: T::CollectionId, item_id: T::ItemId) -> DispatchResult { - let admin_account_id = Self::pallet_account_id(); - T::Items::transfer(&collection_id, &item_id, &admin_account_id) - } - - /// Transfer the NFT to the account returning the tokens. Remove the key and value from - /// storage. - fn do_unlock_nft( - collection_id: T::CollectionId, - item_id: T::ItemId, - asset_id: AssetIdOf, - account: T::AccountId, - ) -> DispatchResult { - match T::Items::transfer(&collection_id, &item_id, &account) { - Ok(()) => { - >::take(asset_id); - return Ok(()) - }, - Err(e) => return Err(e), - } - } - - /// Assert that the `asset_id` was created by means of locking an NFT and fetch - /// its `CollectionId` and `ItemId`. - fn get_nft_id(asset_id: AssetIdOf) -> (T::CollectionId, T::ItemId) { - // Check for explicit existence of the value in the extrinsic. - >::get(asset_id).unwrap() - } - - /// Create the new asset. - fn do_create_asset( - asset_id: AssetIdOf, - admin: T::AccountId, - min_balance: AssetBalanceOf, - ) -> DispatchResult { - T::Assets::create(asset_id, admin, true, min_balance) - } - - /// Mint the `amount` of tokens with `asset_id` into the beneficiary's account. - fn do_mint_asset( - asset_id: AssetIdOf, - beneficiary: &T::AccountId, - amount: AssetBalanceOf, - ) -> DispatchResult { - T::Assets::mint_into(asset_id, beneficiary, amount) - } - - /// If the amount of tokens is enough for the buyback, burn the tokens from the - /// account that is returning the tokens. - fn do_burn_asset( - asset_id: AssetIdOf, - account: &T::AccountId, - amount: AssetBalanceOf, - ) -> Result, DispatchError> { - // Assert that the asset exists in storage. - ensure!(>::contains_key(asset_id), Error::::NFTDataNotFound); - Self::check_token_amount(asset_id, amount); - T::Assets::burn_from(asset_id, account, amount) - } - - /// Assert that the amount of tokens returned is equal to the amount needed to buy - /// back the locked NFT. - fn check_token_amount(asset_id: AssetIdOf, amount: AssetBalanceOf) -> () { - // TODO: create a threshold of tokens to return in order to get back the NFT. - // Otherwise one person can hold one token in order not to let others buy back. - let buyback_threshold: AssetBalanceOf = - T::BuybackThreshold::get().saturated_into::>(); - assert_eq!( - Some(amount), - Some(>::get(asset_id).unwrap().saturating_mul(buyback_threshold)) - ); - } - } -} diff --git a/frame/nft-fractionalisation/src/mock.rs b/frame/nft-fractionalisation/src/mock.rs deleted file mode 100644 index 199113b3aee8e..0000000000000 --- a/frame/nft-fractionalisation/src/mock.rs +++ /dev/null @@ -1,150 +0,0 @@ -use crate as pallet_nft_fractionalisation; -use frame_support::{ - parameter_types, - traits::{AsEnsureOriginWithArg, ConstU128, ConstU16, ConstU32, ConstU64}, - PalletId, -}; -use frame_system as system; -use frame_system::{EnsureRoot, EnsureSigned}; -use sp_core::H256; -use sp_runtime::{ - testing::Header, - traits::{BlakeTwo256, IdentityLookup}, -}; - -type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; -type Block = frame_system::mocking::MockBlock; -type Balance = u128; -type AccountId = u64; - -pub const EXISTENTIAL_DEPOSIT: u128 = 500; -pub const MILLICENTS: Balance = 1_000_000_000; -pub const CENTS: Balance = 1_000 * MILLICENTS; -pub const DOLLARS: Balance = 100 * CENTS; - -// Configure a mock runtime to test the pallet. -frame_support::construct_runtime!( - pub enum Test where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic, - { - System: frame_system, - NftFractions: pallet_nft_fractionalisation, - Assets: pallet_assets, - Uniques: pallet_uniques, - Balances: pallet_balances, - } -); - -impl system::Config for Test { - type BaseCallFilter = frame_support::traits::Everything; - type BlockWeights = (); - type BlockLength = (); - type DbWeight = (); - type RuntimeOrigin = RuntimeOrigin; - type RuntimeCall = RuntimeCall; - type Index = u64; - type BlockNumber = u64; - type Hash = H256; - type Hashing = BlakeTwo256; - type AccountId = u64; - type Lookup = IdentityLookup; - type Header = Header; - type RuntimeEvent = RuntimeEvent; - type BlockHashCount = ConstU64<250>; - type Version = (); - type PalletInfo = PalletInfo; - type AccountData = pallet_balances::AccountData; - type OnNewAccount = (); - type OnKilledAccount = (); - type SystemWeightInfo = (); - type SS58Prefix = ConstU16<42>; - type OnSetCode = (); - type MaxConsumers = frame_support::traits::ConstU32<16>; -} - -impl pallet_balances::Config for Test { - type MaxLocks = ConstU32<50>; - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; - /// The type for recording an account's balance. - type Balance = Balance; - /// The ubiquitous event type. - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); - type ExistentialDeposit = ConstU128; - type AccountStore = System; - type WeightInfo = pallet_balances::weights::SubstrateWeight; -} - -parameter_types! { - pub const AssetDeposit: Balance = 100 * DOLLARS; - pub const ApprovalDeposit: Balance = 1 * DOLLARS; - pub const StringLimit: u32 = 50; - pub const MetadataDepositBase: Balance = 10 * DOLLARS; - pub const MetadataDepositPerByte: Balance = 1 * DOLLARS; -} - -impl pallet_assets::Config for Test { - type RuntimeEvent = RuntimeEvent; - type Balance = u128; - type AssetId = u32; - type Currency = Balances; - type ForceOrigin = EnsureRoot; - type AssetDeposit = AssetDeposit; - type AssetAccountDeposit = ConstU128; - type MetadataDepositBase = MetadataDepositBase; - type MetadataDepositPerByte = MetadataDepositPerByte; - type ApprovalDeposit = ApprovalDeposit; - type StringLimit = StringLimit; - type Freezer = (); - type Extra = (); - type WeightInfo = pallet_assets::weights::SubstrateWeight; -} - -parameter_types! { - pub const CollectionDeposit: Balance = 100 * DOLLARS; - pub const ItemDeposit: Balance = 1 * DOLLARS; - pub const KeyLimit: u32 = 32; - pub const ValueLimit: u32 = 256; -} - -impl pallet_uniques::Config for Test { - type RuntimeEvent = RuntimeEvent; - type CollectionId = u32; - type ItemId = u32; - type Currency = Balances; - type ForceOrigin = frame_system::EnsureRoot; - type CollectionDeposit = CollectionDeposit; - type ItemDeposit = ItemDeposit; - type MetadataDepositBase = MetadataDepositBase; - type AttributeDepositBase = MetadataDepositBase; - type DepositPerByte = MetadataDepositPerByte; - type StringLimit = StringLimit; - type KeyLimit = KeyLimit; - type ValueLimit = ValueLimit; - type WeightInfo = pallet_uniques::weights::SubstrateWeight; - #[cfg(feature = "runtime-benchmarks")] - type Helper = (); - type CreateOrigin = AsEnsureOriginWithArg>; - type Locker = (); -} - -parameter_types! { - pub const NftFractionsPalletId: PalletId = PalletId(*b"fraction"); -} - -impl pallet_nft_fractionalisation::Config for Test { - type RuntimeEvent = RuntimeEvent; - type PalletId = NftFractionsPalletIdPalletId; - type Currency = Balances; - type CollectionId = Uniques; - type ItemId = Uniques; - type AssetId = Assets; -} - -// Build genesis storage according to the mock runtime. -pub fn new_test_ext() -> sp_io::TestExternalities { - system::GenesisConfig::default().build_storage::().unwrap().into() -} diff --git a/frame/nft-fractionalisation/src/tests.rs b/frame/nft-fractionalisation/src/tests.rs deleted file mode 100644 index f0126c9f07ff3..0000000000000 --- a/frame/nft-fractionalisation/src/tests.rs +++ /dev/null @@ -1,16 +0,0 @@ -use crate::{mock::*, Error}; -use frame_support::{assert_noop, assert_ok, traits::Currency}; - -#[test] -fn address_is_set() { - new_test_ext().execute_with(|| { - // Dispatch a signed extrinsic. - assert_eq!(NftFractions::pallet_address(), None); - assert_ok!(NftFractions::set_pallet_address(RuntimeOrigin::signed(1))); - assert_eq!(NftFractions::pallet_address(), Some(1u64)); - // assert_eq!( - // NftFractions::issuance(), - // Some(>::total_issuance()) - // ) - }); -} diff --git a/frame/nft-fractionalisation/Cargo.toml b/frame/nft-fractionalization/Cargo.toml similarity index 97% rename from frame/nft-fractionalisation/Cargo.toml rename to frame/nft-fractionalization/Cargo.toml index 1d5ab04c5cb88..96c84c3cd8104 100644 --- a/frame/nft-fractionalisation/Cargo.toml +++ b/frame/nft-fractionalization/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "pallet-nft-fractionalisation" +name = "pallet-nft-fractionalization" version = "4.0.0-dev" description = "FRAME pallet for semi-fungible tokens." authors = ["Parity Technologies "] diff --git a/frame/nft-fractionalization/README.md b/frame/nft-fractionalization/README.md new file mode 100644 index 0000000000000..ab073777915fe --- /dev/null +++ b/frame/nft-fractionalization/README.md @@ -0,0 +1,3 @@ +### Lock NFT + +Lock an NFT from `pallet-nfts` and mint fungible assets from `pallet-assets`. diff --git a/frame/nft-fractionalization/src/benchmarking.rs b/frame/nft-fractionalization/src/benchmarking.rs new file mode 100644 index 0000000000000..7984d817148af --- /dev/null +++ b/frame/nft-fractionalization/src/benchmarking.rs @@ -0,0 +1,129 @@ +// This file is part of Substrate. + +// Copyright (C) 2020-2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Nft fractionalization pallet benchmarking. + +#![cfg(feature = "runtime-benchmarks")] + +use super::*; +use frame_benchmarking::{benchmarks, whitelisted_caller}; +use frame_support::{ + assert_ok, + traits::{ + fungible::{Inspect as InspectFungible, Unbalanced}, + tokens::nonfungibles_v2::{Create, Mutate}, + }, +}; +use frame_system::RawOrigin as SystemOrigin; +use pallet_nfts::{CollectionConfig, CollectionSettings, ItemConfig, MintSettings}; +use sp_runtime::traits::{Bounded, StaticLookup}; +use sp_std::prelude::*; + +use crate::Pallet as NftFractions; + +type BalanceOf = + <::Currency as InspectFungible<::AccountId>>::Balance; + +type CollectionConfigOf = CollectionConfig< + BalanceOf, + ::BlockNumber, + ::NftCollectionId, +>; + +fn default_collection_config() -> CollectionConfigOf +where + T::Currency: InspectFungible, +{ + CollectionConfig { + settings: CollectionSettings::all_enabled(), + max_supply: None, + mint_settings: MintSettings::default(), + } +} + +fn mint_nft(nft_id: T::NftId) -> (T::AccountId, AccountIdLookupOf) +where + T::NftCollectionId: From, + T::Currency: Unbalanced, + T::Nfts: Create, T::BlockNumber, T::NftCollectionId>> + + Mutate, +{ + let caller: T::AccountId = whitelisted_caller(); + let caller_lookup = T::Lookup::unlookup(caller.clone()); + assert_ok!(T::Currency::set_balance(&caller, BalanceOf::::max_value())); + assert_ok!(T::Nfts::create_collection(&caller, &caller, &default_collection_config::())); + assert_ok!(T::Nfts::mint_into(&0.into(), &nft_id, &caller, &ItemConfig::default(), true)); + (caller, caller_lookup) +} + +fn assert_last_event(generic_event: ::RuntimeEvent) { + let events = frame_system::Pallet::::events(); + let system_event: ::RuntimeEvent = generic_event.into(); + // compare to the last event record + let frame_system::EventRecord { event, .. } = &events[events.len() - 1]; + assert_eq!(event, &system_event); +} + +benchmarks! { + where_clause { + where + T::NftCollectionId: From, + T::NftId: From, + T::Currency: Unbalanced, + T::Nfts: Create, T::BlockNumber, T::NftCollectionId>> + + Mutate, + } + + fractionalize { + let (caller, caller_lookup) = mint_nft::(0.into()); + }: _(SystemOrigin::Signed(caller.clone()), 0.into(), 0.into(), 0.into(), caller_lookup, 1000u32.into()) + verify { + assert_last_event::( + Event::NftFractionalized { + nft_collection: 0.into(), + nft: 0.into(), + fractions: 1000u32.into(), + asset: 0.into(), + beneficiary: caller, + }.into() + ); + } + + unify { + let (caller, caller_lookup) = mint_nft::(0.into()); + NftFractions::::fractionalize( + SystemOrigin::Signed(caller.clone()).into(), + 0.into(), + 0.into(), + 0.into(), + caller_lookup.clone(), + 1000u32.into(), + )?; + }: _(SystemOrigin::Signed(caller.clone()), 0.into(), 0.into(), 0.into(), caller_lookup) + verify { + assert_last_event::( + Event::NftUnified { + nft_collection: 0.into(), + nft: 0.into(), + asset: 0.into(), + beneficiary: caller, + }.into() + ); + } + + impl_benchmark_test_suite!(NftFractions, crate::mock::new_test_ext(), crate::mock::Test); +} diff --git a/frame/nft-fractionalization/src/lib.rs b/frame/nft-fractionalization/src/lib.rs new file mode 100644 index 0000000000000..f2e4180eb064d --- /dev/null +++ b/frame/nft-fractionalization/src/lib.rs @@ -0,0 +1,359 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! * NFT Fractionalization pallet +//! +//! This pallet provides the basic functionality that should allow users +//! to leverage partial ownership, transfers, and sales, of illiquid assets, +//! whether real-world assets represented by their digital twins, or NFTs, +//! or original NFTs. +//! +//! The functionality allows a user to lock an NFT they own, create a new +//! fungible asset, and mint a set amount of tokens (`fractions`). +//! +//! It also allows the user to burn 100% of the asset and to unlock the NFT +//! into their account. +//! +//! ### Functions +//! +//! * `fractionalize`: lock the NFT, create and mint new asset. +//! * `unify`: return 100% of the asset, unlock the NFT. + + +// Ensure we're `no_std` when compiling for Wasm. +#![cfg_attr(not(feature = "std"), no_std)] + +mod types; + +#[cfg(feature = "runtime-benchmarks")] +mod benchmarking; +#[cfg(test)] +pub mod mock; +#[cfg(test)] +mod tests; + +pub mod weights; + +use frame_system::Config as SystemConfig; +pub use pallet::*; +pub use scale_info::Type; +pub use types::*; +pub use weights::WeightInfo; + +#[frame_support::pallet] +pub mod pallet { + use super::*; + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; + use sp_runtime::traits::{One, Zero}; + use std::fmt::Display; + + use frame_support::{ + dispatch::DispatchResult, + sp_runtime::traits::{AccountIdConversion, AtLeast32BitUnsigned, StaticLookup}, + traits::{ + fungibles::{ + metadata::{CalcMetadataDeposit, Mutate as MutateMetadata}, + Create, Destroy, Inspect, Mutate, + }, + tokens::nonfungibles_v2::{ + Inspect as NonFungiblesInspect, LockableNonfungible, Transfer, + }, + Currency, ExistenceRequirement, + }, + PalletId, + }; + + pub type AssetIdOf = + <::Assets as Inspect<::AccountId>>::AssetId; + pub type AssetBalanceOf = + <::Assets as Inspect<::AccountId>>::Balance; + pub type AccountIdLookupOf = <::Lookup as StaticLookup>::Source; + + #[pallet::pallet] + #[pallet::generate_store(pub(super) trait Store)] + pub struct Pallet(_); + + #[pallet::config] + pub trait Config: frame_system::Config { + /// The overarching event type. + type RuntimeEvent: From> + IsType<::RuntimeEvent>; + + /// The currency mechanism, used for paying for deposits. + type Currency: Currency; + + /// Identifier for the collection of NFT. + type NftCollectionId: Member + Parameter + MaxEncodedLen + Copy + Display; + + /// The type used to identify an NFT within a collection. + type NftId: Member + Parameter + MaxEncodedLen + Copy + Display; + + /// The type used to describe the amount of fractions converted into assets. + type AssetBalance: AtLeast32BitUnsigned + + codec::FullCodec + + Copy + + MaybeSerializeDeserialize + + sp_std::fmt::Debug + + From + + TypeInfo + + MaxEncodedLen; + + /// The type used to identify the assets created during fractionalization. + type AssetId: Member + + Parameter + + Copy + + From + + MaybeSerializeDeserialize + + MaxEncodedLen + + PartialOrd + + TypeInfo; + + /// Registry for the minted assets. + type Assets: Inspect + + Create + + Destroy + + Mutate + + MutateMetadata + + CalcMetadataDeposit<>::Balance>; + + /// Registry for minted NFTs. + type Nfts: NonFungiblesInspect< + Self::AccountId, + ItemId = Self::NftId, + CollectionId = Self::NftCollectionId, + > + Transfer; + + /// Locker trait to enable NFT's locking. + type NftLocker: LockableNonfungible; + + /// The pallet's id, used for deriving its sovereign account ID. + #[pallet::constant] + type PalletId: Get; + + /// Weight information for extrinsics in this pallet. + type WeightInfo: WeightInfo; + } + + /// Keeps track of the corresponding NFT ID, asset ID and amount minted. + #[pallet::storage] + #[pallet::getter(fn nft_to_asset)] + pub type NftToAsset = StorageMap< + _, + Blake2_128Concat, + (T::NftCollectionId, T::NftId), + Details, AssetBalanceOf>, + OptionQuery, + >; + + #[pallet::event] + #[pallet::generate_deposit(pub(super) fn deposit_event)] + pub enum Event { + /// An NFT was successfully fractionalized. + NftFractionalized { + nft_collection: T::NftCollectionId, + nft: T::NftId, + fractions: AssetBalanceOf, + asset: AssetIdOf, + beneficiary: T::AccountId, + }, + /// An NFT was successfully returned back. + NftUnified { + nft_collection: T::NftCollectionId, + nft: T::NftId, + asset: AssetIdOf, + beneficiary: T::AccountId, + }, + } + + #[pallet::error] + pub enum Error { + /// Information about the fractionalized NFT can't be found. + DataNotFound, + /// The signing account has no permission to do the operation. + NoPermission, + /// NFT doesn't exist. + NftNotFound, + } + + #[pallet::call] + impl Pallet { + /// Lock the NFT and mint a new fungible asset. + /// + /// The dispatch origin for this call must be Signed. + /// The origin must be the owner of the NFT they are trying to lock. + /// + /// - `nft_collection_id`: The ID used to identify the collection of the NFT. + /// Is used within the context of `pallet_nfts`. + /// - `nft_id`: The ID used to identify the NFT within the given collection. + /// Is used within the context of `pallet_nfts`. + /// - `asset_id`: The ID of the new asset. It must not exist. + /// Is used within the context of `pallet_assets`. + /// - `beneficiary`: The account that will receive the newly created asset. + /// - `fractions`: The amount to be minted of the newly created asset. + #[pallet::call_index(0)] + #[pallet::weight(T::WeightInfo::fractionalize())] + pub fn fractionalize( + origin: OriginFor, + nft_collection_id: T::NftCollectionId, + nft_id: T::NftId, + asset_id: AssetIdOf, + beneficiary: AccountIdLookupOf, + fractions: AssetBalanceOf, + ) -> DispatchResult { + let who = ensure_signed(origin)?; + let beneficiary = T::Lookup::lookup(beneficiary)?; + + let nft_owner = + T::Nfts::owner(&nft_collection_id, &nft_id).ok_or(Error::::NftNotFound)?; + ensure!(nft_owner == who, Error::::NoPermission); + + let pallet_account = Self::get_pallet_account(); + Self::do_lock_nft(nft_collection_id, nft_id)?; + Self::do_create_asset(asset_id, pallet_account.clone())?; + Self::do_mint_asset(asset_id, &beneficiary, fractions)?; + Self::do_set_metadata(asset_id, &who, &pallet_account, &nft_collection_id, &nft_id)?; + + NftToAsset::::insert( + (nft_collection_id, nft_id), + Details { asset: asset_id, fractions }, + ); + + Self::deposit_event(Event::NftFractionalized { + nft_collection: nft_collection_id, + nft: nft_id, + fractions, + asset: asset_id, + beneficiary, + }); + + Ok(()) + } + + /// Burn the whole amount of the asset and return back the locked NFT. + /// + /// The dispatch origin for this call must be Signed. + /// + /// - `nft_collection_id`: The ID used to identify the collection of the NFT. + /// Is used within the context of `pallet_nfts`. + /// - `nft_id`: The ID used to identify the NFT within the given collection. + /// Is used within the context of `pallet_nfts`. + /// - `asset_id`: The ID of the asset being returned and destroyed. Must match + /// the original ID of the created asset, corresponding to the NFT. + /// Is used within the context of `pallet_assets`. + /// - `beneficiary`: The account that will receive the unified NFT. + #[pallet::call_index(1)] + #[pallet::weight(T::WeightInfo::unify())] + pub fn unify( + origin: OriginFor, + nft_collection_id: T::NftCollectionId, + nft_id: T::NftId, + asset_id: AssetIdOf, + beneficiary: AccountIdLookupOf, + ) -> DispatchResult { + let who = ensure_signed(origin)?; + let beneficiary = T::Lookup::lookup(beneficiary)?; + + NftToAsset::::try_mutate_exists((nft_collection_id, nft_id), |maybe_details| { + let details = maybe_details.take().ok_or(Error::::DataNotFound)?; + ensure!(details.asset == asset_id, Error::::DataNotFound); + + Self::do_burn_asset(asset_id, &who, details.fractions)?; + Self::do_unlock_nft(nft_collection_id, nft_id, &beneficiary)?; + + Self::deposit_event(Event::NftUnified { + nft_collection: nft_collection_id, + nft: nft_id, + asset: asset_id, + beneficiary, + }); + + Ok(()) + }) + } + } + + impl Pallet { + /// The account ID of the pallet. + /// + /// This actually does computation. If you need to keep using it, then make sure you cache + /// the value and only call this once. + fn get_pallet_account() -> T::AccountId { + T::PalletId::get().into_account_truncating() + } + + /// Transfer the NFT from the account holding that NFT to the pallet's account. + fn do_lock_nft(nft_collection_id: T::NftCollectionId, nft_id: T::NftId) -> DispatchResult { + T::NftLocker::lock(&nft_collection_id, &nft_id) + } + + /// Transfer the NFT to the account returning the tokens. + fn do_unlock_nft( + nft_collection_id: T::NftCollectionId, + nft_id: T::NftId, + account: &T::AccountId, + ) -> DispatchResult { + T::NftLocker::unlock(&nft_collection_id, &nft_id)?; + T::Nfts::transfer(&nft_collection_id, &nft_id, account) + } + + /// Create the new asset. + fn do_create_asset(asset_id: AssetIdOf, admin: T::AccountId) -> DispatchResult { + T::Assets::create(asset_id, admin, false, One::one()) + } + + /// Mint the `amount` of tokens with `asset_id` into the beneficiary's account. + fn do_mint_asset( + asset_id: AssetIdOf, + beneficiary: &T::AccountId, + amount: AssetBalanceOf, + ) -> DispatchResult { + T::Assets::mint_into(asset_id, beneficiary, amount) + } + + /// Burn tokens from the account. + fn do_burn_asset( + asset_id: AssetIdOf, + account: &T::AccountId, + amount: AssetBalanceOf, + ) -> DispatchResult { + T::Assets::burn_from(asset_id, account, amount)?; + T::Assets::start_destroy(asset_id, None) + } + + /// Set the metadata for the newly created asset. + fn do_set_metadata( + asset_id: AssetIdOf, + depositor: &T::AccountId, + pallet_account: &T::AccountId, + nft_collection_id: &T::NftCollectionId, + nft_id: &T::NftId, + ) -> DispatchResult { + let symbol = "FRAC"; + let name = format!("Frac {nft_collection_id}-{nft_id}"); + let deposit = T::Assets::calc(&name.clone().into(), &symbol.into()); + if deposit != Zero::zero() { + T::Currency::transfer( + &depositor, + &pallet_account, + deposit, + ExistenceRequirement::KeepAlive, + )?; + } + T::Assets::set(asset_id, &pallet_account, name.into(), symbol.into(), 0) + } + } +} diff --git a/frame/nft-fractionalization/src/mock.rs b/frame/nft-fractionalization/src/mock.rs new file mode 100644 index 0000000000000..d3c05137d8869 --- /dev/null +++ b/frame/nft-fractionalization/src/mock.rs @@ -0,0 +1,206 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Test environment for Nft fractionalization pallet. + +use super::*; +use crate as pallet_nft_fractionalization; + +use frame_support::{ + construct_runtime, parameter_types, + traits::{ + tokens::{ + nonfungibles_v2::{Inspect, LockableNonfungible, Mutate}, + AttributeNamespace, + }, + AsEnsureOriginWithArg, ConstU32, ConstU64, Locker, + }, + PalletId, +}; +use frame_system::EnsureSigned; +use pallet_nfts::{ItemConfig, PalletFeatures}; +use sp_core::H256; +use sp_runtime::{ + testing::Header, + traits::{BlakeTwo256, IdentityLookup}, + DispatchResult, +}; + +type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; +type Block = frame_system::mocking::MockBlock; +type AccountId = ::AccountId; +type ItemId = ::ItemId; +type CollectionId = ::CollectionId; + +// Configure a mock runtime to test the pallet. +construct_runtime!( + pub enum Test where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: frame_system, + NftFractions: pallet_nft_fractionalization, + Assets: pallet_assets, + Balances: pallet_balances, + Nfts: pallet_nfts, + } +); +impl frame_system::Config for Test { + type BaseCallFilter = frame_support::traits::Everything; + type BlockWeights = (); + type BlockLength = (); + type RuntimeOrigin = RuntimeOrigin; + type RuntimeCall = RuntimeCall; + type Index = u64; + type BlockNumber = u64; + type Hash = H256; + type Hashing = BlakeTwo256; + type AccountId = u64; + type Lookup = IdentityLookup; + type Header = Header; + type RuntimeEvent = RuntimeEvent; + type BlockHashCount = ConstU64<250>; + type DbWeight = (); + type Version = (); + type PalletInfo = PalletInfo; + type AccountData = pallet_balances::AccountData; + type OnNewAccount = (); + type OnKilledAccount = (); + type SystemWeightInfo = (); + type SS58Prefix = (); + type OnSetCode = (); + type MaxConsumers = ConstU32<16>; +} + +impl pallet_balances::Config for Test { + type Balance = u64; + type DustRemoval = (); + type RuntimeEvent = RuntimeEvent; + type ExistentialDeposit = ConstU64<1>; + type AccountStore = System; + type WeightInfo = (); + type MaxLocks = (); + type MaxReserves = ConstU32<50>; + type ReserveIdentifier = [u8; 8]; +} + +impl pallet_assets::Config for Test { + type RuntimeEvent = RuntimeEvent; + type Balance = u64; + type RemoveItemsLimit = ConstU32<1000>; + type AssetId = u32; + type AssetIdParameter = u32; + type Currency = Balances; + type CreateOrigin = AsEnsureOriginWithArg>; + type ForceOrigin = frame_system::EnsureRoot; + type AssetDeposit = ConstU64<1>; + type AssetAccountDeposit = ConstU64<10>; + type MetadataDepositBase = ConstU64<1>; + type MetadataDepositPerByte = ConstU64<1>; + type ApprovalDeposit = ConstU64<1>; + type StringLimit = ConstU32<50>; + type Freezer = (); + type Extra = (); + type CallbackHandle = (); + type WeightInfo = (); + pallet_assets::runtime_benchmarks_enabled! { + type BenchmarkHelper = (); + } +} + +parameter_types! { + pub storage Features: PalletFeatures = PalletFeatures::all_enabled(); +} + +const LOCKED_NFT_KEY: &[u8; 6] = b"locked"; + +pub struct TestLocker; +impl Locker for TestLocker { + fn is_locked(collection: CollectionId, item: ItemId) -> bool { + Nfts::attribute(&collection, &item, &AttributeNamespace::Pallet, LOCKED_NFT_KEY).is_some() + } +} + +impl pallet_nfts::Config for Test { + type RuntimeEvent = RuntimeEvent; + type CollectionId = u32; + type ItemId = u32; + type Currency = Balances; + type CreateOrigin = AsEnsureOriginWithArg>; + type ForceOrigin = frame_system::EnsureRoot; + type Locker = TestLocker; + type CollectionDeposit = ConstU64<2>; + type ItemDeposit = ConstU64<1>; + type MetadataDepositBase = ConstU64<1>; + type AttributeDepositBase = ConstU64<1>; + type DepositPerByte = ConstU64<1>; + type StringLimit = ConstU32<50>; + type KeyLimit = ConstU32<50>; + type ValueLimit = ConstU32<50>; + type ApprovalsLimit = ConstU32<10>; + type ItemAttributesApprovalsLimit = ConstU32<2>; + type MaxTips = ConstU32<10>; + type MaxDeadlineDuration = ConstU64<10000>; + type Features = Features; + type WeightInfo = (); + pallet_nfts::runtime_benchmarks_enabled! { + type Helper = (); + } +} + +parameter_types! { + pub const NftFractionsPalletId: PalletId = PalletId(*b"fraction"); +} + +pub struct MockLockableNonfungible; +impl LockableNonfungible for MockLockableNonfungible { + fn lock(collection: &CollectionId, item: &ItemId) -> DispatchResult { + >::set_attribute( + collection, + item, + LOCKED_NFT_KEY, + &[1], + ) + } + fn unlock(collection: &CollectionId, item: &ItemId) -> DispatchResult { + >::clear_attribute(collection, item, LOCKED_NFT_KEY) + } +} + +impl Config for Test { + type RuntimeEvent = RuntimeEvent; + type Currency = Balances; + type NftCollectionId = ::CollectionId; + type NftId = ::ItemId; + type AssetBalance = ::Balance; + type AssetId = ::AssetId; + type Assets = Assets; + type Nfts = Nfts; + type PalletId = NftFractionsPalletId; + type NftLocker = MockLockableNonfungible; + type WeightInfo = (); +} + +// Build genesis storage according to the mock runtime. +pub(crate) fn new_test_ext() -> sp_io::TestExternalities { + let t = frame_system::GenesisConfig::default().build_storage::().unwrap(); + + let mut ext = sp_io::TestExternalities::new(t); + ext.execute_with(|| System::set_block_number(1)); + ext +} diff --git a/frame/nft-fractionalization/src/tests.rs b/frame/nft-fractionalization/src/tests.rs new file mode 100644 index 0000000000000..431ff62106885 --- /dev/null +++ b/frame/nft-fractionalization/src/tests.rs @@ -0,0 +1,235 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Tests for Nft fractionalization pallet. + +use crate::{mock::*, *}; +use frame_support::{ + assert_noop, assert_ok, + traits::{ + fungibles::{metadata::Inspect, InspectEnumerable}, + Currency, + }, +}; +use pallet_nfts::CollectionConfig; +use sp_runtime::{DispatchError, ModuleError}; + +fn assets() -> Vec { + let mut s: Vec<_> = <::Assets>::asset_ids().collect(); + s.sort(); + s +} + +fn events() -> Vec> { + let result = System::events() + .into_iter() + .map(|r| r.event) + .filter_map(|e| { + if let mock::RuntimeEvent::NftFractions(inner) = e { + Some(inner) + } else { + None + } + }) + .collect(); + + System::reset_events(); + + result +} + +#[test] +fn fractionalize_should_work() { + new_test_ext().execute_with(|| { + let nft_collection_id = 0; + let nft_id = 0; + let asset_id = 0; + let fractions = 1000; + + Balances::make_free_balance_be(&1, 100); + Balances::make_free_balance_be(&2, 100); + + assert_ok!(Nfts::force_create(RuntimeOrigin::root(), 1, CollectionConfig::default())); + assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), nft_collection_id, nft_id, 1, None)); + + assert_ok!(NftFractions::fractionalize( + RuntimeOrigin::signed(1), + nft_collection_id, + nft_id, + asset_id, + 2, + fractions, + )); + assert_eq!(assets(), vec![asset_id]); + assert_eq!(Assets::balance(asset_id, 2), fractions); + assert_eq!(String::from_utf8(Assets::name(0)).unwrap(), "Frac 0-0"); + assert_eq!(String::from_utf8(Assets::symbol(0)).unwrap(), "FRAC"); + assert_eq!(Nfts::owner(nft_collection_id, nft_id), Some(1)); + assert_noop!( + Nfts::transfer(RuntimeOrigin::signed(1), nft_collection_id, nft_id, 2), + DispatchError::Module(ModuleError { + index: 4, + error: [12, 0, 0, 0], + message: Some("ItemLocked") + }) + ); + + let details = NftToAsset::::get((&nft_collection_id, &nft_id)).unwrap(); + assert_eq!(details.asset, asset_id); + assert_eq!(details.fractions, fractions); + + assert!(events().contains(&Event::::NftFractionalized { + nft_collection: nft_collection_id, + nft: nft_id, + fractions, + asset: asset_id, + beneficiary: 2, + })); + + let nft_id = nft_id + 1; + assert_noop!( + NftFractions::fractionalize( + RuntimeOrigin::signed(1), + nft_collection_id, + nft_id, + asset_id, + 2, + fractions, + ), + Error::::NftNotFound + ); + + assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), nft_collection_id, nft_id, 2, None)); + assert_noop!( + NftFractions::fractionalize( + RuntimeOrigin::signed(1), + nft_collection_id, + nft_id, + asset_id, + 2, + fractions, + ), + Error::::NoPermission + ); + }); +} + +#[test] +fn unify_should_work() { + new_test_ext().execute_with(|| { + let nft_collection_id = 0; + let nft_id = 0; + let asset_id = 0; + let fractions = 1000; + + Balances::make_free_balance_be(&1, 100); + Balances::make_free_balance_be(&2, 100); + + assert_ok!(Nfts::force_create(RuntimeOrigin::root(), 1, CollectionConfig::default())); + assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), nft_collection_id, nft_id, 1, None)); + assert_ok!(NftFractions::fractionalize( + RuntimeOrigin::signed(1), + nft_collection_id, + nft_id, + asset_id, + 2, + fractions, + )); + + assert_noop!( + NftFractions::unify( + RuntimeOrigin::signed(2), + nft_collection_id + 1, + nft_id, + asset_id, + 1, + ), + Error::::DataNotFound + ); + assert_noop!( + NftFractions::unify( + RuntimeOrigin::signed(2), + nft_collection_id, + nft_id, + asset_id + 1, + 1, + ), + Error::::DataNotFound + ); + + // can't unify the asset a user doesn't hold + assert_noop!( + NftFractions::unify(RuntimeOrigin::signed(1), nft_collection_id, nft_id, asset_id, 1), + DispatchError::Module(ModuleError { + index: 2, + error: [1, 0, 0, 0], + message: Some("NoAccount") + }) + ); + + assert_ok!(NftFractions::unify( + RuntimeOrigin::signed(2), + nft_collection_id, + nft_id, + asset_id, + 1, + )); + + assert_eq!(Assets::balance(asset_id, 2), 0); + assert_eq!(Nfts::owner(nft_collection_id, nft_id), Some(1)); + assert!(!NftToAsset::::contains_key((&nft_collection_id, &nft_id))); + + assert!(events().contains(&Event::::NftUnified { + nft_collection: nft_collection_id, + nft: nft_id, + asset: asset_id, + beneficiary: 1, + })); + + // validate we need to hold the full balance to un-fractionalize the NFT + let asset_id = asset_id + 1; + assert_ok!(NftFractions::fractionalize( + RuntimeOrigin::signed(1), + nft_collection_id, + nft_id, + asset_id, + 1, + fractions, + )); + assert_ok!(Assets::transfer(RuntimeOrigin::signed(1), asset_id, 2, 1)); + assert_eq!(Assets::balance(asset_id, 1), fractions - 1); + assert_eq!(Assets::balance(asset_id, 2), 1); + assert_noop!( + NftFractions::unify(RuntimeOrigin::signed(1), nft_collection_id, nft_id, asset_id, 1), + DispatchError::Module(ModuleError { + index: 2, + error: [0, 0, 0, 0], + message: Some("BalanceLow") + }) + ); + + assert_ok!(Assets::transfer(RuntimeOrigin::signed(2), asset_id, 1, 1)); + assert_ok!(NftFractions::unify( + RuntimeOrigin::signed(1), + nft_collection_id, + nft_id, + asset_id, + 2, + )); + assert_eq!(Nfts::owner(nft_collection_id, nft_id), Some(2)); + }); +} diff --git a/frame/nft-fractionalization/src/types.rs b/frame/nft-fractionalization/src/types.rs new file mode 100644 index 0000000000000..080c2e37d23fb --- /dev/null +++ b/frame/nft-fractionalization/src/types.rs @@ -0,0 +1,31 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Various basic types for use in the Nft fractionalization pallet. + +use codec::{Decode, Encode, MaxEncodedLen}; +use scale_info::TypeInfo; + +/// Stores the details of a fractionalized item. +#[derive(Decode, Encode, Default, PartialEq, Eq, MaxEncodedLen, TypeInfo)] +pub struct Details { + /// Minted asset. + pub asset: AssetId, + + /// Number of fractions minted. + pub fractions: Fractions, +} diff --git a/frame/nft-fractionalization/src/weights.rs b/frame/nft-fractionalization/src/weights.rs new file mode 100644 index 0000000000000..59d086dcd95dd --- /dev/null +++ b/frame/nft-fractionalization/src/weights.rs @@ -0,0 +1,86 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for pallet_nft_fractionalization +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2022-12-22, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! HOSTNAME: `bm3`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 + +// Executed Command: +// /home/benchbot/cargo_target_dir/production/substrate +// benchmark +// pallet +// --steps=50 +// --repeat=20 +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/var/lib/gitlab-runner/builds/zyw4fam_/0/parity/mirrors/substrate/.git/.artifacts/bench.json +// --pallet=pallet_nft_fractionalization +// --chain=dev +// --header=./HEADER-APACHE2 +// --output=./frame/nft-fractionalization/src/weights.rs +// --template=./.maintain/frame-weight-template.hbs + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] + +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use sp_std::marker::PhantomData; + +/// Weight functions needed for pallet_nft_fractionalization. +pub trait WeightInfo { + fn fractionalize() -> Weight; + fn unify() -> Weight; +} + +/// Weights for pallet_nft_fractionalization using the Substrate node and recommended hardware. +pub struct SubstrateWeight(PhantomData); +impl WeightInfo for SubstrateWeight { + fn fractionalize() -> Weight { + // Minimum execution time: 44_312 nanoseconds. + Weight::from_ref_time(44_871_000) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(5)) + } + fn unify() -> Weight { + // Minimum execution time: 31_654 nanoseconds. + Weight::from_ref_time(32_078_000) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(5)) + } +} + +// For backwards compatibility and tests +impl WeightInfo for () { + fn fractionalize() -> Weight { + // Minimum execution time: 44_312 nanoseconds. + Weight::from_ref_time(44_871_000) + .saturating_add(RocksDbWeight::get().reads(2)) + .saturating_add(RocksDbWeight::get().writes(5)) + } + fn unify() -> Weight { + // Minimum execution time: 31_654 nanoseconds. + Weight::from_ref_time(32_078_000) + .saturating_add(RocksDbWeight::get().reads(2)) + .saturating_add(RocksDbWeight::get().writes(5)) + } +} diff --git a/frame/support/src/traits/tokens/fungibles/metadata.rs b/frame/support/src/traits/tokens/fungibles/metadata.rs index b736ab1489f58..85958948d839f 100644 --- a/frame/support/src/traits/tokens/fungibles/metadata.rs +++ b/frame/support/src/traits/tokens/fungibles/metadata.rs @@ -39,3 +39,8 @@ pub trait Mutate: Inspect { decimals: u8, ) -> DispatchResult; } + +pub trait CalcMetadataDeposit { + // Returns the required deposit amount for a given metadata. + fn calc(name: &Vec, symbol: &Vec) -> DepositBalance; +} diff --git a/frame/support/src/traits/tokens/nonfungibles_v2.rs b/frame/support/src/traits/tokens/nonfungibles_v2.rs index 17c5d887defc0..79ff4f5f1e3af 100644 --- a/frame/support/src/traits/tokens/nonfungibles_v2.rs +++ b/frame/support/src/traits/tokens/nonfungibles_v2.rs @@ -294,3 +294,11 @@ pub trait Transfer: Inspect { destination: &AccountId, ) -> DispatchResult; } + +/// Trait for locking and unlocking non-fungible sets of items. +pub trait LockableNonfungible { + /// Lock `item` of `collection`. + fn lock(collection: &CollectionId, item: &ItemId) -> DispatchResult; + /// Unlock `item` of `collection`. + fn unlock(collection: &CollectionId, item: &ItemId) -> DispatchResult; +} From c4ab35c4711bc8a6c0568babb5b903905716946b Mon Sep 17 00:00:00 2001 From: Jegor Sidorenko Date: Fri, 27 Jan 2023 14:09:26 +0200 Subject: [PATCH 074/101] fmt --- frame/nft-fractionalization/src/lib.rs | 35 +++++++++++++------------- 1 file changed, 17 insertions(+), 18 deletions(-) diff --git a/frame/nft-fractionalization/src/lib.rs b/frame/nft-fractionalization/src/lib.rs index f2e4180eb064d..85413f9e54514 100644 --- a/frame/nft-fractionalization/src/lib.rs +++ b/frame/nft-fractionalization/src/lib.rs @@ -16,24 +16,23 @@ // limitations under the License. //! * NFT Fractionalization pallet -//! -//! This pallet provides the basic functionality that should allow users -//! to leverage partial ownership, transfers, and sales, of illiquid assets, +//! +//! This pallet provides the basic functionality that should allow users +//! to leverage partial ownership, transfers, and sales, of illiquid assets, //! whether real-world assets represented by their digital twins, or NFTs, -//! or original NFTs. -//! -//! The functionality allows a user to lock an NFT they own, create a new -//! fungible asset, and mint a set amount of tokens (`fractions`). +//! or original NFTs. +//! +//! The functionality allows a user to lock an NFT they own, create a new +//! fungible asset, and mint a set amount of tokens (`fractions`). //! //! It also allows the user to burn 100% of the asset and to unlock the NFT -//! into their account. -//! -//! ### Functions +//! into their account. +//! +//! ### Functions //! //! * `fractionalize`: lock the NFT, create and mint new asset. //! * `unify`: return 100% of the asset, unlock the NFT. - // Ensure we're `no_std` when compiling for Wasm. #![cfg_attr(not(feature = "std"), no_std)] @@ -194,16 +193,16 @@ pub mod pallet { /// Lock the NFT and mint a new fungible asset. /// /// The dispatch origin for this call must be Signed. - /// The origin must be the owner of the NFT they are trying to lock. + /// The origin must be the owner of the NFT they are trying to lock. /// /// - `nft_collection_id`: The ID used to identify the collection of the NFT. /// Is used within the context of `pallet_nfts`. - /// - `nft_id`: The ID used to identify the NFT within the given collection. + /// - `nft_id`: The ID used to identify the NFT within the given collection. /// Is used within the context of `pallet_nfts`. /// - `asset_id`: The ID of the new asset. It must not exist. /// Is used within the context of `pallet_assets`. - /// - `beneficiary`: The account that will receive the newly created asset. - /// - `fractions`: The amount to be minted of the newly created asset. + /// - `beneficiary`: The account that will receive the newly created asset. + /// - `fractions`: The amount to be minted of the newly created asset. #[pallet::call_index(0)] #[pallet::weight(T::WeightInfo::fractionalize())] pub fn fractionalize( @@ -244,17 +243,17 @@ pub mod pallet { } /// Burn the whole amount of the asset and return back the locked NFT. - /// + /// /// The dispatch origin for this call must be Signed. /// /// - `nft_collection_id`: The ID used to identify the collection of the NFT. /// Is used within the context of `pallet_nfts`. - /// - `nft_id`: The ID used to identify the NFT within the given collection. + /// - `nft_id`: The ID used to identify the NFT within the given collection. /// Is used within the context of `pallet_nfts`. /// - `asset_id`: The ID of the asset being returned and destroyed. Must match /// the original ID of the created asset, corresponding to the NFT. /// Is used within the context of `pallet_assets`. - /// - `beneficiary`: The account that will receive the unified NFT. + /// - `beneficiary`: The account that will receive the unified NFT. #[pallet::call_index(1)] #[pallet::weight(T::WeightInfo::unify())] pub fn unify( From 8e00a6bf10dbe18438bc88d98776fad2a6bd09af Mon Sep 17 00:00:00 2001 From: lana-shanghai Date: Wed, 8 Feb 2023 12:16:51 +0100 Subject: [PATCH 075/101] Add fee reserved before creating an asset --- bin/node/runtime/src/lib.rs | 1 + frame/nft-fractionalization/src/lib.rs | 25 +++++++++++++++++++++--- frame/nft-fractionalization/src/types.rs | 5 ++++- 3 files changed, 27 insertions(+), 4 deletions(-) diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index 5733b06478c31..a733145f3070a 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -1629,6 +1629,7 @@ impl LockableNonfungible for RuntimeLockableNonfungible { impl pallet_nft_fractionalization::Config for Runtime { type RuntimeEvent = RuntimeEvent; type PalletId = NftFractionsPalletId; + type Fee = AssetDeposit; type Currency = Balances; type NftCollectionId = ::CollectionId; type NftId = ::ItemId; diff --git a/frame/nft-fractionalization/src/lib.rs b/frame/nft-fractionalization/src/lib.rs index 85413f9e54514..0582623d900cb 100644 --- a/frame/nft-fractionalization/src/lib.rs +++ b/frame/nft-fractionalization/src/lib.rs @@ -59,7 +59,8 @@ pub mod pallet { use frame_support::pallet_prelude::*; use frame_system::pallet_prelude::*; use sp_runtime::traits::{One, Zero}; - use std::fmt::Display; + use scale_info::prelude::format; + use sp_std::fmt::Display; use frame_support::{ dispatch::DispatchResult, @@ -81,6 +82,8 @@ pub mod pallet { <::Assets as Inspect<::AccountId>>::AssetId; pub type AssetBalanceOf = <::Assets as Inspect<::AccountId>>::Balance; + pub type FeeDepositOf = + <::Currency as Currency<::AccountId>>::Balance; pub type AccountIdLookupOf = <::Lookup as StaticLookup>::Source; #[pallet::pallet] @@ -95,6 +98,9 @@ pub mod pallet { /// The currency mechanism, used for paying for deposits. type Currency: Currency; + #[pallet::constant] + type Fee: Get>; + /// Identifier for the collection of NFT. type NftCollectionId: Member + Parameter + MaxEncodedLen + Copy + Display; @@ -154,7 +160,7 @@ pub mod pallet { _, Blake2_128Concat, (T::NftCollectionId, T::NftId), - Details, AssetBalanceOf>, + Details, AssetBalanceOf, FeeDepositOf>, OptionQuery, >; @@ -221,6 +227,8 @@ pub mod pallet { ensure!(nft_owner == who, Error::::NoPermission); let pallet_account = Self::get_pallet_account(); + let fee = T::Fee::get(); + Self::do_reserve_fee(&nft_owner, pallet_account.clone(), fee)?; Self::do_lock_nft(nft_collection_id, nft_id)?; Self::do_create_asset(asset_id, pallet_account.clone())?; Self::do_mint_asset(asset_id, &beneficiary, fractions)?; @@ -228,7 +236,7 @@ pub mod pallet { NftToAsset::::insert( (nft_collection_id, nft_id), - Details { asset: asset_id, fractions }, + Details { asset: asset_id, fractions, fee }, ); Self::deposit_event(Event::NftFractionalized { @@ -270,8 +278,11 @@ pub mod pallet { let details = maybe_details.take().ok_or(Error::::DataNotFound)?; ensure!(details.asset == asset_id, Error::::DataNotFound); + let pallet_account = Self::get_pallet_account(); + let fee = details.fee; Self::do_burn_asset(asset_id, &who, details.fractions)?; Self::do_unlock_nft(nft_collection_id, nft_id, &beneficiary)?; + Self::do_unreserve_fee(&who, pallet_account, fee)?; Self::deposit_event(Event::NftUnified { nft_collection: nft_collection_id, @@ -294,6 +305,14 @@ pub mod pallet { T::PalletId::get().into_account_truncating() } + fn do_reserve_fee(account: &T::AccountId, pallet_account: T::AccountId, fee: FeeDepositOf) -> DispatchResult { + T::Currency::transfer(account, &pallet_account, fee, ExistenceRequirement::KeepAlive) + } + + fn do_unreserve_fee(account: &T::AccountId, pallet_account: T::AccountId, fee: FeeDepositOf) -> DispatchResult { + T::Currency::transfer(&pallet_account, account, fee, ExistenceRequirement::KeepAlive) + } + /// Transfer the NFT from the account holding that NFT to the pallet's account. fn do_lock_nft(nft_collection_id: T::NftCollectionId, nft_id: T::NftId) -> DispatchResult { T::NftLocker::lock(&nft_collection_id, &nft_id) diff --git a/frame/nft-fractionalization/src/types.rs b/frame/nft-fractionalization/src/types.rs index 080c2e37d23fb..8cf5187c0b4cc 100644 --- a/frame/nft-fractionalization/src/types.rs +++ b/frame/nft-fractionalization/src/types.rs @@ -22,10 +22,13 @@ use scale_info::TypeInfo; /// Stores the details of a fractionalized item. #[derive(Decode, Encode, Default, PartialEq, Eq, MaxEncodedLen, TypeInfo)] -pub struct Details { +pub struct Details { /// Minted asset. pub asset: AssetId, /// Number of fractions minted. pub fractions: Fractions, + + /// Fee reserved for creating a new asset. + pub fee: Fee, } From 9df16c3516cf6667373eb7cf8b668f69cb7a251f Mon Sep 17 00:00:00 2001 From: lana-shanghai Date: Wed, 8 Feb 2023 14:01:44 +0100 Subject: [PATCH 076/101] Use ReservableCurrency for fee deposit --- frame/nft-fractionalization/src/lib.rs | 22 +++++++--------------- frame/nft-fractionalization/src/types.rs | 4 +++- 2 files changed, 10 insertions(+), 16 deletions(-) diff --git a/frame/nft-fractionalization/src/lib.rs b/frame/nft-fractionalization/src/lib.rs index 0582623d900cb..5a25a605741af 100644 --- a/frame/nft-fractionalization/src/lib.rs +++ b/frame/nft-fractionalization/src/lib.rs @@ -73,7 +73,7 @@ pub mod pallet { tokens::nonfungibles_v2::{ Inspect as NonFungiblesInspect, LockableNonfungible, Transfer, }, - Currency, ExistenceRequirement, + Currency, ExistenceRequirement, ReservableCurrency, }, PalletId, }; @@ -96,7 +96,7 @@ pub mod pallet { type RuntimeEvent: From> + IsType<::RuntimeEvent>; /// The currency mechanism, used for paying for deposits. - type Currency: Currency; + type Currency: ReservableCurrency; #[pallet::constant] type Fee: Get>; @@ -160,7 +160,7 @@ pub mod pallet { _, Blake2_128Concat, (T::NftCollectionId, T::NftId), - Details, AssetBalanceOf, FeeDepositOf>, + Details, AssetBalanceOf, FeeDepositOf, T::AccountId>, OptionQuery, >; @@ -228,7 +228,7 @@ pub mod pallet { let pallet_account = Self::get_pallet_account(); let fee = T::Fee::get(); - Self::do_reserve_fee(&nft_owner, pallet_account.clone(), fee)?; + T::Currency::reserve(&nft_owner, fee)?; Self::do_lock_nft(nft_collection_id, nft_id)?; Self::do_create_asset(asset_id, pallet_account.clone())?; Self::do_mint_asset(asset_id, &beneficiary, fractions)?; @@ -236,7 +236,7 @@ pub mod pallet { NftToAsset::::insert( (nft_collection_id, nft_id), - Details { asset: asset_id, fractions, fee }, + Details { asset: asset_id, fractions, asset_creator: nft_owner, fee }, ); Self::deposit_event(Event::NftFractionalized { @@ -278,11 +278,11 @@ pub mod pallet { let details = maybe_details.take().ok_or(Error::::DataNotFound)?; ensure!(details.asset == asset_id, Error::::DataNotFound); - let pallet_account = Self::get_pallet_account(); let fee = details.fee; + let asset_creator = details.asset_creator; Self::do_burn_asset(asset_id, &who, details.fractions)?; Self::do_unlock_nft(nft_collection_id, nft_id, &beneficiary)?; - Self::do_unreserve_fee(&who, pallet_account, fee)?; + T::Currency::unreserve(&asset_creator, fee); Self::deposit_event(Event::NftUnified { nft_collection: nft_collection_id, @@ -305,14 +305,6 @@ pub mod pallet { T::PalletId::get().into_account_truncating() } - fn do_reserve_fee(account: &T::AccountId, pallet_account: T::AccountId, fee: FeeDepositOf) -> DispatchResult { - T::Currency::transfer(account, &pallet_account, fee, ExistenceRequirement::KeepAlive) - } - - fn do_unreserve_fee(account: &T::AccountId, pallet_account: T::AccountId, fee: FeeDepositOf) -> DispatchResult { - T::Currency::transfer(&pallet_account, account, fee, ExistenceRequirement::KeepAlive) - } - /// Transfer the NFT from the account holding that NFT to the pallet's account. fn do_lock_nft(nft_collection_id: T::NftCollectionId, nft_id: T::NftId) -> DispatchResult { T::NftLocker::lock(&nft_collection_id, &nft_id) diff --git a/frame/nft-fractionalization/src/types.rs b/frame/nft-fractionalization/src/types.rs index 8cf5187c0b4cc..d3d932b0b60fd 100644 --- a/frame/nft-fractionalization/src/types.rs +++ b/frame/nft-fractionalization/src/types.rs @@ -22,7 +22,7 @@ use scale_info::TypeInfo; /// Stores the details of a fractionalized item. #[derive(Decode, Encode, Default, PartialEq, Eq, MaxEncodedLen, TypeInfo)] -pub struct Details { +pub struct Details { /// Minted asset. pub asset: AssetId, @@ -31,4 +31,6 @@ pub struct Details { /// Fee reserved for creating a new asset. pub fee: Fee, + + pub asset_creator: AccountId, } From ea81d247403bd3df42e983c1a9c201761f57c7e8 Mon Sep 17 00:00:00 2001 From: Jegor Sidorenko Date: Mon, 13 Feb 2023 16:36:14 +0200 Subject: [PATCH 077/101] Improvements --- bin/node/runtime/src/lib.rs | 2 +- .../nft-fractionalization/src/benchmarking.rs | 2 + frame/nft-fractionalization/src/lib.rs | 51 +++++++++---------- frame/nft-fractionalization/src/mock.rs | 1 + frame/nft-fractionalization/src/tests.rs | 2 + frame/nft-fractionalization/src/types.rs | 7 +-- 6 files changed, 34 insertions(+), 31 deletions(-) diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index a733145f3070a..a3e1e48cdb849 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -1629,7 +1629,7 @@ impl LockableNonfungible for RuntimeLockableNonfungible { impl pallet_nft_fractionalization::Config for Runtime { type RuntimeEvent = RuntimeEvent; type PalletId = NftFractionsPalletId; - type Fee = AssetDeposit; + type Deposit = AssetDeposit; type Currency = Balances; type NftCollectionId = ::CollectionId; type NftId = ::ItemId; diff --git a/frame/nft-fractionalization/src/benchmarking.rs b/frame/nft-fractionalization/src/benchmarking.rs index 7984d817148af..5be385f774f20 100644 --- a/frame/nft-fractionalization/src/benchmarking.rs +++ b/frame/nft-fractionalization/src/benchmarking.rs @@ -81,6 +81,8 @@ fn assert_last_event(generic_event: ::RuntimeEvent) { benchmarks! { where_clause { where + T::AssetBalance: From, + T::AssetId: From, T::NftCollectionId: From, T::NftId: From, T::Currency: Unbalanced, diff --git a/frame/nft-fractionalization/src/lib.rs b/frame/nft-fractionalization/src/lib.rs index 5a25a605741af..0133360eadfc1 100644 --- a/frame/nft-fractionalization/src/lib.rs +++ b/frame/nft-fractionalization/src/lib.rs @@ -56,15 +56,18 @@ pub use weights::WeightInfo; #[frame_support::pallet] pub mod pallet { use super::*; - use frame_support::pallet_prelude::*; + use frame_support::{ + pallet_prelude::*, + traits::tokens::{AssetId, Balance as AssetBalance}, + }; use frame_system::pallet_prelude::*; - use sp_runtime::traits::{One, Zero}; use scale_info::prelude::format; + use sp_runtime::traits::{One, Zero}; use sp_std::fmt::Display; use frame_support::{ dispatch::DispatchResult, - sp_runtime::traits::{AccountIdConversion, AtLeast32BitUnsigned, StaticLookup}, + sp_runtime::traits::{AccountIdConversion, StaticLookup}, traits::{ fungibles::{ metadata::{CalcMetadataDeposit, Mutate as MutateMetadata}, @@ -82,7 +85,7 @@ pub mod pallet { <::Assets as Inspect<::AccountId>>::AssetId; pub type AssetBalanceOf = <::Assets as Inspect<::AccountId>>::Balance; - pub type FeeDepositOf = + pub type DepositOf = <::Currency as Currency<::AccountId>>::Balance; pub type AccountIdLookupOf = <::Lookup as StaticLookup>::Source; @@ -99,7 +102,7 @@ pub mod pallet { type Currency: ReservableCurrency; #[pallet::constant] - type Fee: Get>; + type Deposit: Get>; /// Identifier for the collection of NFT. type NftCollectionId: Member + Parameter + MaxEncodedLen + Copy + Display; @@ -108,24 +111,10 @@ pub mod pallet { type NftId: Member + Parameter + MaxEncodedLen + Copy + Display; /// The type used to describe the amount of fractions converted into assets. - type AssetBalance: AtLeast32BitUnsigned - + codec::FullCodec - + Copy - + MaybeSerializeDeserialize - + sp_std::fmt::Debug - + From - + TypeInfo - + MaxEncodedLen; + type AssetBalance: AssetBalance; /// The type used to identify the assets created during fractionalization. - type AssetId: Member - + Parameter - + Copy - + From - + MaybeSerializeDeserialize - + MaxEncodedLen - + PartialOrd - + TypeInfo; + type AssetId: AssetId; /// Registry for the minted assets. type Assets: Inspect @@ -160,7 +149,7 @@ pub mod pallet { _, Blake2_128Concat, (T::NftCollectionId, T::NftId), - Details, AssetBalanceOf, FeeDepositOf, T::AccountId>, + Details, AssetBalanceOf, DepositOf, T::AccountId>, OptionQuery, >; @@ -201,6 +190,8 @@ pub mod pallet { /// The dispatch origin for this call must be Signed. /// The origin must be the owner of the NFT they are trying to lock. /// + /// `Deposit` funds of sender are reserved. + /// /// - `nft_collection_id`: The ID used to identify the collection of the NFT. /// Is used within the context of `pallet_nfts`. /// - `nft_id`: The ID used to identify the NFT within the given collection. @@ -209,6 +200,8 @@ pub mod pallet { /// Is used within the context of `pallet_assets`. /// - `beneficiary`: The account that will receive the newly created asset. /// - `fractions`: The amount to be minted of the newly created asset. + /// + /// Emits `NftFractionalized` event when successful. #[pallet::call_index(0)] #[pallet::weight(T::WeightInfo::fractionalize())] pub fn fractionalize( @@ -227,8 +220,8 @@ pub mod pallet { ensure!(nft_owner == who, Error::::NoPermission); let pallet_account = Self::get_pallet_account(); - let fee = T::Fee::get(); - T::Currency::reserve(&nft_owner, fee)?; + let deposit = T::Deposit::get(); + T::Currency::reserve(&nft_owner, deposit)?; Self::do_lock_nft(nft_collection_id, nft_id)?; Self::do_create_asset(asset_id, pallet_account.clone())?; Self::do_mint_asset(asset_id, &beneficiary, fractions)?; @@ -236,7 +229,7 @@ pub mod pallet { NftToAsset::::insert( (nft_collection_id, nft_id), - Details { asset: asset_id, fractions, asset_creator: nft_owner, fee }, + Details { asset: asset_id, fractions, asset_creator: nft_owner, deposit }, ); Self::deposit_event(Event::NftFractionalized { @@ -254,6 +247,8 @@ pub mod pallet { /// /// The dispatch origin for this call must be Signed. /// + /// `Deposit` funds will be returned to `asset_creator`. + /// /// - `nft_collection_id`: The ID used to identify the collection of the NFT. /// Is used within the context of `pallet_nfts`. /// - `nft_id`: The ID used to identify the NFT within the given collection. @@ -262,6 +257,8 @@ pub mod pallet { /// the original ID of the created asset, corresponding to the NFT. /// Is used within the context of `pallet_assets`. /// - `beneficiary`: The account that will receive the unified NFT. + /// + /// Emits `NftUnified` event when successful. #[pallet::call_index(1)] #[pallet::weight(T::WeightInfo::unify())] pub fn unify( @@ -278,11 +275,11 @@ pub mod pallet { let details = maybe_details.take().ok_or(Error::::DataNotFound)?; ensure!(details.asset == asset_id, Error::::DataNotFound); - let fee = details.fee; + let deposit = details.deposit; let asset_creator = details.asset_creator; Self::do_burn_asset(asset_id, &who, details.fractions)?; Self::do_unlock_nft(nft_collection_id, nft_id, &beneficiary)?; - T::Currency::unreserve(&asset_creator, fee); + T::Currency::unreserve(&asset_creator, deposit); Self::deposit_event(Event::NftUnified { nft_collection: nft_collection_id, diff --git a/frame/nft-fractionalization/src/mock.rs b/frame/nft-fractionalization/src/mock.rs index d3c05137d8869..075e63fb8976c 100644 --- a/frame/nft-fractionalization/src/mock.rs +++ b/frame/nft-fractionalization/src/mock.rs @@ -185,6 +185,7 @@ impl LockableNonfungible for MockLockableNonfungible { impl Config for Test { type RuntimeEvent = RuntimeEvent; type Currency = Balances; + type Deposit = ConstU64<1>; type NftCollectionId = ::CollectionId; type NftId = ::ItemId; type AssetBalance = ::Balance; diff --git a/frame/nft-fractionalization/src/tests.rs b/frame/nft-fractionalization/src/tests.rs index 431ff62106885..e9369f3a3af99 100644 --- a/frame/nft-fractionalization/src/tests.rs +++ b/frame/nft-fractionalization/src/tests.rs @@ -76,6 +76,7 @@ fn fractionalize_should_work() { )); assert_eq!(assets(), vec![asset_id]); assert_eq!(Assets::balance(asset_id, 2), fractions); + assert_eq!(Balances::reserved_balance(&1), 2); assert_eq!(String::from_utf8(Assets::name(0)).unwrap(), "Frac 0-0"); assert_eq!(String::from_utf8(Assets::symbol(0)).unwrap(), "FRAC"); assert_eq!(Nfts::owner(nft_collection_id, nft_id), Some(1)); @@ -190,6 +191,7 @@ fn unify_should_work() { )); assert_eq!(Assets::balance(asset_id, 2), 0); + assert_eq!(Balances::reserved_balance(&1), 1); assert_eq!(Nfts::owner(nft_collection_id, nft_id), Some(1)); assert!(!NftToAsset::::contains_key((&nft_collection_id, &nft_id))); diff --git a/frame/nft-fractionalization/src/types.rs b/frame/nft-fractionalization/src/types.rs index d3d932b0b60fd..2e9b338ac57bc 100644 --- a/frame/nft-fractionalization/src/types.rs +++ b/frame/nft-fractionalization/src/types.rs @@ -22,15 +22,16 @@ use scale_info::TypeInfo; /// Stores the details of a fractionalized item. #[derive(Decode, Encode, Default, PartialEq, Eq, MaxEncodedLen, TypeInfo)] -pub struct Details { +pub struct Details { /// Minted asset. pub asset: AssetId, /// Number of fractions minted. pub fractions: Fractions, - /// Fee reserved for creating a new asset. - pub fee: Fee, + /// Reserved deposit for creating a new asset. + pub deposit: Deposit, + /// Account that fractionalized an item. pub asset_creator: AccountId, } From e27331be6c8af648d551d9a1dfb61c4434974b62 Mon Sep 17 00:00:00 2001 From: Jegor Sidorenko Date: Mon, 13 Feb 2023 16:46:47 +0200 Subject: [PATCH 078/101] Revert fmt changes --- client/finality-grandpa/src/communication/gossip.rs | 10 ++++------ client/network/bitswap/src/lib.rs | 5 ++--- client/state-db/src/lib.rs | 5 ++--- 3 files changed, 8 insertions(+), 12 deletions(-) diff --git a/client/finality-grandpa/src/communication/gossip.rs b/client/finality-grandpa/src/communication/gossip.rs index ea9c06ac2dd47..cbcafc727d436 100644 --- a/client/finality-grandpa/src/communication/gossip.rs +++ b/client/finality-grandpa/src/communication/gossip.rs @@ -794,15 +794,14 @@ impl Inner { { let local_view = match self.local_view { None => return None, - Some(ref mut v) => { + Some(ref mut v) => if v.round == round { // Do not send neighbor packets out if `round` has not changed --- // such behavior is punishable. return None } else { v - } - }, + }, }; let set_id = local_view.set_id; @@ -828,7 +827,7 @@ impl Inner { { let local_view = match self.local_view { ref mut x @ None => x.get_or_insert(LocalView::new(set_id, Round(1))), - Some(ref mut v) => { + Some(ref mut v) => if v.set_id == set_id { let diff_authorities = self.authorities.iter().collect::>() != authorities.iter().collect::>(); @@ -846,8 +845,7 @@ impl Inner { return None } else { v - } - }, + }, }; local_view.update_set(set_id); diff --git a/client/network/bitswap/src/lib.rs b/client/network/bitswap/src/lib.rs index 3b26c56e7edaf..62a18b18c839d 100644 --- a/client/network/bitswap/src/lib.rs +++ b/client/network/bitswap/src/lib.rs @@ -127,9 +127,8 @@ impl BitswapRequestHandler { }; match pending_response.send(response) { - Ok(()) => { - trace!(target: LOG_TARGET, "Handled bitswap request from {peer}.",) - }, + Ok(()) => + trace!(target: LOG_TARGET, "Handled bitswap request from {peer}.",), Err(_) => debug!( target: LOG_TARGET, "Failed to handle light client request from {peer}: {}", diff --git a/client/state-db/src/lib.rs b/client/state-db/src/lib.rs index b68dbd4b2c221..1befd6dff3bc8 100644 --- a/client/state-db/src/lib.rs +++ b/client/state-db/src/lib.rs @@ -188,9 +188,8 @@ impl fmt::Debug for StateDbError { write!(f, "Too many sibling blocks at #{number} inserted"), Self::BlockAlreadyExists => write!(f, "Block already exists"), Self::Metadata(message) => write!(f, "Invalid metadata: {}", message), - Self::BlockUnavailable => { - write!(f, "Trying to get a block record from db while it is not commit to db yet") - }, + Self::BlockUnavailable => + write!(f, "Trying to get a block record from db while it is not commit to db yet"), Self::BlockMissing => write!(f, "Block record is missing from the pruning window"), } } From 4a13b272aca298e7224bbda9404f71a7ae69de9f Mon Sep 17 00:00:00 2001 From: Jegor Sidorenko Date: Mon, 13 Feb 2023 16:50:22 +0200 Subject: [PATCH 079/101] A bit more cleanup --- .../procedural/src/pallet/expand/storage.rs | 20 ++++++++----------- primitives/arithmetic/src/per_things.rs | 10 ++++------ 2 files changed, 12 insertions(+), 18 deletions(-) diff --git a/frame/support/procedural/src/pallet/expand/storage.rs b/frame/support/procedural/src/pallet/expand/storage.rs index 6ee5dc341ca8c..ac0cff59b2118 100644 --- a/frame/support/procedural/src/pallet/expand/storage.rs +++ b/frame/support/procedural/src/pallet/expand/storage.rs @@ -374,11 +374,10 @@ pub fn expand_storages(def: &mut Def) -> proc_macro2::TokenStream { QueryKind::OptionQuery => quote::quote_spanned!(storage.attr_span => Option<#value> ), - QueryKind::ResultQuery(error_path, _) => { + QueryKind::ResultQuery(error_path, _) => quote::quote_spanned!(storage.attr_span => Result<#value, #error_path> - ) - }, + ), QueryKind::ValueQuery => quote::quote!(#value), }; quote::quote_spanned!(storage.attr_span => @@ -398,11 +397,10 @@ pub fn expand_storages(def: &mut Def) -> proc_macro2::TokenStream { QueryKind::OptionQuery => quote::quote_spanned!(storage.attr_span => Option<#value> ), - QueryKind::ResultQuery(error_path, _) => { + QueryKind::ResultQuery(error_path, _) => quote::quote_spanned!(storage.attr_span => Result<#value, #error_path> - ) - }, + ), QueryKind::ValueQuery => quote::quote!(#value), }; quote::quote_spanned!(storage.attr_span => @@ -424,11 +422,10 @@ pub fn expand_storages(def: &mut Def) -> proc_macro2::TokenStream { QueryKind::OptionQuery => quote::quote_spanned!(storage.attr_span => Option<#value> ), - QueryKind::ResultQuery(error_path, _) => { + QueryKind::ResultQuery(error_path, _) => quote::quote_spanned!(storage.attr_span => Result<#value, #error_path> - ) - }, + ), QueryKind::ValueQuery => quote::quote!(#value), }; quote::quote_spanned!(storage.attr_span => @@ -450,11 +447,10 @@ pub fn expand_storages(def: &mut Def) -> proc_macro2::TokenStream { QueryKind::OptionQuery => quote::quote_spanned!(storage.attr_span => Option<#value> ), - QueryKind::ResultQuery(error_path, _) => { + QueryKind::ResultQuery(error_path, _) => quote::quote_spanned!(storage.attr_span => Result<#value, #error_path> - ) - }, + ), QueryKind::ValueQuery => quote::quote!(#value), }; quote::quote_spanned!(storage.attr_span => diff --git a/primitives/arithmetic/src/per_things.rs b/primitives/arithmetic/src/per_things.rs index 7454f221b152f..fc3767761175c 100644 --- a/primitives/arithmetic/src/per_things.rs +++ b/primitives/arithmetic/src/per_things.rs @@ -524,18 +524,16 @@ where rem_mul_div_inner += 1.into(); } }, - Rounding::NearestPrefDown => { + Rounding::NearestPrefDown => if rem_mul_upper % denom_upper > denom_upper / 2.into() { // `rem * numer / denom` is less than `numer`, so this will not overflow. rem_mul_div_inner += 1.into(); - } - }, - Rounding::NearestPrefUp => { + }, + Rounding::NearestPrefUp => if rem_mul_upper % denom_upper >= denom_upper / 2.into() + denom_upper % 2.into() { // `rem * numer / denom` is less than `numer`, so this will not overflow. rem_mul_div_inner += 1.into(); - } - }, + }, } rem_mul_div_inner.into() } From bd619d22d9845703f22616e9d59c4ecc8672bb12 Mon Sep 17 00:00:00 2001 From: Jegor Sidorenko Date: Mon, 13 Feb 2023 16:57:34 +0200 Subject: [PATCH 080/101] Consistent naming --- bin/node/runtime/src/lib.rs | 8 ++--- .../nft-fractionalization/src/benchmarking.rs | 6 ++-- frame/nft-fractionalization/src/mock.rs | 6 ++-- frame/nft-fractionalization/src/tests.rs | 36 ++++++++++++------- 4 files changed, 34 insertions(+), 22 deletions(-) diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index a3e1e48cdb849..ada010c1781ca 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -1608,7 +1608,7 @@ impl pallet_nfts::Config for Runtime { } parameter_types! { - pub const NftFractionsPalletId: PalletId = PalletId(*b"fraction"); + pub const NftFractionalizationPalletId: PalletId = PalletId(*b"fraction"); } pub struct RuntimeLockableNonfungible; @@ -1628,7 +1628,7 @@ impl LockableNonfungible for RuntimeLockableNonfungible { impl pallet_nft_fractionalization::Config for Runtime { type RuntimeEvent = RuntimeEvent; - type PalletId = NftFractionsPalletId; + type PalletId = NftFractionalizationPalletId; type Deposit = AssetDeposit; type Currency = Balances; type NftCollectionId = ::CollectionId; @@ -1800,7 +1800,7 @@ construct_runtime!( Nis: pallet_nis, Uniques: pallet_uniques, Nfts: pallet_nfts, - NftFractions: pallet_nft_fractionalization, + NftFractionalization: pallet_nft_fractionalization, TransactionStorage: pallet_transaction_storage, VoterList: pallet_bags_list::, StateTrieMigration: pallet_state_trie_migration, @@ -1931,7 +1931,7 @@ mod benches { [pallet_treasury, Treasury] [pallet_uniques, Uniques] [pallet_nfts, Nfts] - [pallet_nft_fractionalization, NftFractions] + [pallet_nft_fractionalization, NftFractionalization] [pallet_utility, Utility] [pallet_vesting, Vesting] [pallet_whitelist, Whitelist] diff --git a/frame/nft-fractionalization/src/benchmarking.rs b/frame/nft-fractionalization/src/benchmarking.rs index 5be385f774f20..e9137b469a91a 100644 --- a/frame/nft-fractionalization/src/benchmarking.rs +++ b/frame/nft-fractionalization/src/benchmarking.rs @@ -33,7 +33,7 @@ use pallet_nfts::{CollectionConfig, CollectionSettings, ItemConfig, MintSettings use sp_runtime::traits::{Bounded, StaticLookup}; use sp_std::prelude::*; -use crate::Pallet as NftFractions; +use crate::Pallet as NftFractionalization; type BalanceOf = <::Currency as InspectFungible<::AccountId>>::Balance; @@ -107,7 +107,7 @@ benchmarks! { unify { let (caller, caller_lookup) = mint_nft::(0.into()); - NftFractions::::fractionalize( + NftFractionalization::::fractionalize( SystemOrigin::Signed(caller.clone()).into(), 0.into(), 0.into(), @@ -127,5 +127,5 @@ benchmarks! { ); } - impl_benchmark_test_suite!(NftFractions, crate::mock::new_test_ext(), crate::mock::Test); + impl_benchmark_test_suite!(NftFractionalization, crate::mock::new_test_ext(), crate::mock::Test); } diff --git a/frame/nft-fractionalization/src/mock.rs b/frame/nft-fractionalization/src/mock.rs index 075e63fb8976c..452c28f85dceb 100644 --- a/frame/nft-fractionalization/src/mock.rs +++ b/frame/nft-fractionalization/src/mock.rs @@ -54,7 +54,7 @@ construct_runtime!( UncheckedExtrinsic = UncheckedExtrinsic, { System: frame_system, - NftFractions: pallet_nft_fractionalization, + NftFractionalization: pallet_nft_fractionalization, Assets: pallet_assets, Balances: pallet_balances, Nfts: pallet_nfts, @@ -164,7 +164,7 @@ impl pallet_nfts::Config for Test { } parameter_types! { - pub const NftFractionsPalletId: PalletId = PalletId(*b"fraction"); + pub const NftFractionalizationPalletId: PalletId = PalletId(*b"fraction"); } pub struct MockLockableNonfungible; @@ -192,7 +192,7 @@ impl Config for Test { type AssetId = ::AssetId; type Assets = Assets; type Nfts = Nfts; - type PalletId = NftFractionsPalletId; + type PalletId = NftFractionalizationPalletId; type NftLocker = MockLockableNonfungible; type WeightInfo = (); } diff --git a/frame/nft-fractionalization/src/tests.rs b/frame/nft-fractionalization/src/tests.rs index e9369f3a3af99..6b9ec7495c8cc 100644 --- a/frame/nft-fractionalization/src/tests.rs +++ b/frame/nft-fractionalization/src/tests.rs @@ -39,7 +39,7 @@ fn events() -> Vec> { .into_iter() .map(|r| r.event) .filter_map(|e| { - if let mock::RuntimeEvent::NftFractions(inner) = e { + if let mock::RuntimeEvent::NftFractionalization(inner) = e { Some(inner) } else { None @@ -66,7 +66,7 @@ fn fractionalize_should_work() { assert_ok!(Nfts::force_create(RuntimeOrigin::root(), 1, CollectionConfig::default())); assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), nft_collection_id, nft_id, 1, None)); - assert_ok!(NftFractions::fractionalize( + assert_ok!(NftFractionalization::fractionalize( RuntimeOrigin::signed(1), nft_collection_id, nft_id, @@ -103,7 +103,7 @@ fn fractionalize_should_work() { let nft_id = nft_id + 1; assert_noop!( - NftFractions::fractionalize( + NftFractionalization::fractionalize( RuntimeOrigin::signed(1), nft_collection_id, nft_id, @@ -116,7 +116,7 @@ fn fractionalize_should_work() { assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), nft_collection_id, nft_id, 2, None)); assert_noop!( - NftFractions::fractionalize( + NftFractionalization::fractionalize( RuntimeOrigin::signed(1), nft_collection_id, nft_id, @@ -142,7 +142,7 @@ fn unify_should_work() { assert_ok!(Nfts::force_create(RuntimeOrigin::root(), 1, CollectionConfig::default())); assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), nft_collection_id, nft_id, 1, None)); - assert_ok!(NftFractions::fractionalize( + assert_ok!(NftFractionalization::fractionalize( RuntimeOrigin::signed(1), nft_collection_id, nft_id, @@ -152,7 +152,7 @@ fn unify_should_work() { )); assert_noop!( - NftFractions::unify( + NftFractionalization::unify( RuntimeOrigin::signed(2), nft_collection_id + 1, nft_id, @@ -162,7 +162,7 @@ fn unify_should_work() { Error::::DataNotFound ); assert_noop!( - NftFractions::unify( + NftFractionalization::unify( RuntimeOrigin::signed(2), nft_collection_id, nft_id, @@ -174,7 +174,13 @@ fn unify_should_work() { // can't unify the asset a user doesn't hold assert_noop!( - NftFractions::unify(RuntimeOrigin::signed(1), nft_collection_id, nft_id, asset_id, 1), + NftFractionalization::unify( + RuntimeOrigin::signed(1), + nft_collection_id, + nft_id, + asset_id, + 1 + ), DispatchError::Module(ModuleError { index: 2, error: [1, 0, 0, 0], @@ -182,7 +188,7 @@ fn unify_should_work() { }) ); - assert_ok!(NftFractions::unify( + assert_ok!(NftFractionalization::unify( RuntimeOrigin::signed(2), nft_collection_id, nft_id, @@ -204,7 +210,7 @@ fn unify_should_work() { // validate we need to hold the full balance to un-fractionalize the NFT let asset_id = asset_id + 1; - assert_ok!(NftFractions::fractionalize( + assert_ok!(NftFractionalization::fractionalize( RuntimeOrigin::signed(1), nft_collection_id, nft_id, @@ -216,7 +222,13 @@ fn unify_should_work() { assert_eq!(Assets::balance(asset_id, 1), fractions - 1); assert_eq!(Assets::balance(asset_id, 2), 1); assert_noop!( - NftFractions::unify(RuntimeOrigin::signed(1), nft_collection_id, nft_id, asset_id, 1), + NftFractionalization::unify( + RuntimeOrigin::signed(1), + nft_collection_id, + nft_id, + asset_id, + 1 + ), DispatchError::Module(ModuleError { index: 2, error: [0, 0, 0, 0], @@ -225,7 +237,7 @@ fn unify_should_work() { ); assert_ok!(Assets::transfer(RuntimeOrigin::signed(2), asset_id, 1, 1)); - assert_ok!(NftFractions::unify( + assert_ok!(NftFractionalization::unify( RuntimeOrigin::signed(1), nft_collection_id, nft_id, From 98236e1694714fe62ad025497f559d9f74f4a620 Mon Sep 17 00:00:00 2001 From: Jegor Sidorenko Date: Mon, 13 Feb 2023 17:41:53 +0200 Subject: [PATCH 081/101] Make it more generic --- bin/node/runtime/src/lib.rs | 6 +++++- frame/nft-fractionalization/src/lib.rs | 15 ++++++++++++--- frame/nft-fractionalization/src/mock.rs | 6 +++++- 3 files changed, 22 insertions(+), 5 deletions(-) diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index ada010c1781ca..0b218b70d41e3 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -1609,6 +1609,8 @@ impl pallet_nfts::Config for Runtime { parameter_types! { pub const NftFractionalizationPalletId: PalletId = PalletId(*b"fraction"); + pub NewAssetSymbol: String = "FRAC".to_string(); + pub NewAssetName: String = "Frac".to_string(); } pub struct RuntimeLockableNonfungible; @@ -1628,9 +1630,10 @@ impl LockableNonfungible for RuntimeLockableNonfungible { impl pallet_nft_fractionalization::Config for Runtime { type RuntimeEvent = RuntimeEvent; - type PalletId = NftFractionalizationPalletId; type Deposit = AssetDeposit; type Currency = Balances; + type NewAssetSymbol = NewAssetSymbol; + type NewAssetName = NewAssetName; type NftCollectionId = ::CollectionId; type NftId = ::ItemId; type AssetBalance = ::Balance; @@ -1638,6 +1641,7 @@ impl pallet_nft_fractionalization::Config for Runtime { type Assets = Assets; type Nfts = Nfts; type NftLocker = RuntimeLockableNonfungible; + type PalletId = NftFractionalizationPalletId; type WeightInfo = pallet_nft_fractionalization::weights::SubstrateWeight; } diff --git a/frame/nft-fractionalization/src/lib.rs b/frame/nft-fractionalization/src/lib.rs index 0133360eadfc1..af3045a69fb21 100644 --- a/frame/nft-fractionalization/src/lib.rs +++ b/frame/nft-fractionalization/src/lib.rs @@ -138,6 +138,14 @@ pub mod pallet { #[pallet::constant] type PalletId: Get; + /// The newly created asset's symbol. + #[pallet::constant] + type NewAssetSymbol: Get; + + /// The newly created asset's name. + #[pallet::constant] + type NewAssetName: Get; + /// Weight information for extrinsics in this pallet. type WeightInfo: WeightInfo; } @@ -349,9 +357,10 @@ pub mod pallet { nft_collection_id: &T::NftCollectionId, nft_id: &T::NftId, ) -> DispatchResult { - let symbol = "FRAC"; - let name = format!("Frac {nft_collection_id}-{nft_id}"); - let deposit = T::Assets::calc(&name.clone().into(), &symbol.into()); + let symbol = T::NewAssetSymbol::get(); + let name = T::NewAssetName::get(); + let name = format!("{} {}-{}", name, nft_collection_id, nft_id); + let deposit = T::Assets::calc(&name.clone().into(), &symbol.clone().into()); if deposit != Zero::zero() { T::Currency::transfer( &depositor, diff --git a/frame/nft-fractionalization/src/mock.rs b/frame/nft-fractionalization/src/mock.rs index 452c28f85dceb..d33034daa0fea 100644 --- a/frame/nft-fractionalization/src/mock.rs +++ b/frame/nft-fractionalization/src/mock.rs @@ -165,6 +165,8 @@ impl pallet_nfts::Config for Test { parameter_types! { pub const NftFractionalizationPalletId: PalletId = PalletId(*b"fraction"); + pub NewAssetSymbol: String = "FRAC".to_string(); + pub NewAssetName: String = "Frac".to_string(); } pub struct MockLockableNonfungible; @@ -184,8 +186,10 @@ impl LockableNonfungible for MockLockableNonfungible { impl Config for Test { type RuntimeEvent = RuntimeEvent; - type Currency = Balances; type Deposit = ConstU64<1>; + type Currency = Balances; + type NewAssetSymbol = NewAssetSymbol; + type NewAssetName = NewAssetName; type NftCollectionId = ::CollectionId; type NftId = ::ItemId; type AssetBalance = ::Balance; From caccb147b5ad0cb68906cf150b3be716b56ef69a Mon Sep 17 00:00:00 2001 From: Jegor Sidorenko Date: Mon, 13 Feb 2023 18:13:44 +0200 Subject: [PATCH 082/101] Leftover --- frame/support/procedural/src/pallet/expand/storage.rs | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/frame/support/procedural/src/pallet/expand/storage.rs b/frame/support/procedural/src/pallet/expand/storage.rs index ac0cff59b2118..195a62431f279 100644 --- a/frame/support/procedural/src/pallet/expand/storage.rs +++ b/frame/support/procedural/src/pallet/expand/storage.rs @@ -474,11 +474,10 @@ pub fn expand_storages(def: &mut Def) -> proc_macro2::TokenStream { QueryKind::OptionQuery => quote::quote_spanned!(storage.attr_span => Option<#value> ), - QueryKind::ResultQuery(error_path, _) => { + QueryKind::ResultQuery(error_path, _) => quote::quote_spanned!(storage.attr_span => Result<#value, #error_path> - ) - }, + ), QueryKind::ValueQuery => quote::quote!(#value), }; quote::quote_spanned!(storage.attr_span => From 96ec9a3f3df99687312164a84d4670724d344992 Mon Sep 17 00:00:00 2001 From: Jegor Sidorenko Date: Tue, 14 Feb 2023 11:42:43 +0200 Subject: [PATCH 083/101] Use Vec instead of String --- bin/node/runtime/src/lib.rs | 4 ++-- frame/nft-fractionalization/src/lib.rs | 10 +++++----- frame/nft-fractionalization/src/mock.rs | 4 ++-- 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index 0b218b70d41e3..5b5394c63950e 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -1609,8 +1609,8 @@ impl pallet_nfts::Config for Runtime { parameter_types! { pub const NftFractionalizationPalletId: PalletId = PalletId(*b"fraction"); - pub NewAssetSymbol: String = "FRAC".to_string(); - pub NewAssetName: String = "Frac".to_string(); + pub NewAssetSymbol: Vec = (*b"FRAC").into(); + pub NewAssetName: Vec = (*b"Frac").into(); } pub struct RuntimeLockableNonfungible; diff --git a/frame/nft-fractionalization/src/lib.rs b/frame/nft-fractionalization/src/lib.rs index af3045a69fb21..32397a5bae9bb 100644 --- a/frame/nft-fractionalization/src/lib.rs +++ b/frame/nft-fractionalization/src/lib.rs @@ -61,9 +61,9 @@ pub mod pallet { traits::tokens::{AssetId, Balance as AssetBalance}, }; use frame_system::pallet_prelude::*; - use scale_info::prelude::format; + use scale_info::prelude::{format, string::String}; use sp_runtime::traits::{One, Zero}; - use sp_std::fmt::Display; + use sp_std::{fmt::Display, prelude::*}; use frame_support::{ dispatch::DispatchResult, @@ -140,11 +140,11 @@ pub mod pallet { /// The newly created asset's symbol. #[pallet::constant] - type NewAssetSymbol: Get; + type NewAssetSymbol: Get>; /// The newly created asset's name. #[pallet::constant] - type NewAssetName: Get; + type NewAssetName: Get>; /// Weight information for extrinsics in this pallet. type WeightInfo: WeightInfo; @@ -359,7 +359,7 @@ pub mod pallet { ) -> DispatchResult { let symbol = T::NewAssetSymbol::get(); let name = T::NewAssetName::get(); - let name = format!("{} {}-{}", name, nft_collection_id, nft_id); + let name = format!("{} {nft_collection_id}-{nft_id}", String::from_utf8_lossy(&name)); let deposit = T::Assets::calc(&name.clone().into(), &symbol.clone().into()); if deposit != Zero::zero() { T::Currency::transfer( diff --git a/frame/nft-fractionalization/src/mock.rs b/frame/nft-fractionalization/src/mock.rs index d33034daa0fea..a378348f9b838 100644 --- a/frame/nft-fractionalization/src/mock.rs +++ b/frame/nft-fractionalization/src/mock.rs @@ -165,8 +165,8 @@ impl pallet_nfts::Config for Test { parameter_types! { pub const NftFractionalizationPalletId: PalletId = PalletId(*b"fraction"); - pub NewAssetSymbol: String = "FRAC".to_string(); - pub NewAssetName: String = "Frac".to_string(); + pub NewAssetSymbol: Vec = (*b"FRAC").into(); + pub NewAssetName: Vec = (*b"Frac").into(); } pub struct MockLockableNonfungible; From ed16d143217f637c6367dc5cd993af4dfedf8a43 Mon Sep 17 00:00:00 2001 From: Jegor Sidorenko Date: Fri, 24 Feb 2023 16:55:02 +0200 Subject: [PATCH 084/101] Update to the latest + improve the Locker trait --- bin/node/runtime/src/lib.rs | 35 ++--- frame/nft-fractionalization/src/lib.rs | 14 +- frame/nft-fractionalization/src/mock.rs | 56 ++++---- frame/nft-fractionalization/src/tests.rs | 127 +++++++++++------- frame/support/src/traits/tokens/misc.rs | 22 ++- .../src/traits/tokens/nonfungibles_v2.rs | 8 -- 6 files changed, 148 insertions(+), 114 deletions(-) diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index 8861c7e636daf..a0b6f07efb951 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -33,10 +33,7 @@ use frame_support::{ parameter_types, traits::{ fungible::ItemOf, - tokens::{ - nonfungibles_v2::{Inspect, LockableNonfungible, Mutate}, - AttributeNamespace, - }, + tokens::nonfungibles_v2::{Inspect, Mutate}, AsEnsureOriginWithArg, ConstBool, ConstU128, ConstU16, ConstU32, Currency, EitherOfDiverse, EqualPrivilegeOnly, Everything, Imbalance, InstanceFilter, KeyOwnerProofSystem, LockIdentifier, Locker, Nothing, OnUnbalanced, U128CurrencyToVote, WithdrawReasons, @@ -1587,7 +1584,18 @@ type CollectionId = ::CollectionId; pub struct NftLocker; impl Locker for NftLocker { fn is_locked(collection: CollectionId, item: ItemId) -> bool { - Nfts::attribute(&collection, &item, &AttributeNamespace::Pallet, LOCKED_NFT_KEY).is_some() + >::system_attribute(&collection, &item, LOCKED_NFT_KEY).is_some() + } + fn lock(collection: &CollectionId, item: &ItemId) -> DispatchResult { + >::set_attribute( + collection, + item, + LOCKED_NFT_KEY, + &[1], + ) + } + fn unlock(collection: &CollectionId, item: &ItemId) -> DispatchResult { + >::clear_attribute(collection, item, LOCKED_NFT_KEY) } } @@ -1626,21 +1634,6 @@ parameter_types! { pub NewAssetName: Vec = (*b"Frac").into(); } -pub struct RuntimeLockableNonfungible; -impl LockableNonfungible for RuntimeLockableNonfungible { - fn lock(collection: &CollectionId, item: &ItemId) -> DispatchResult { - >::set_attribute( - collection, - item, - LOCKED_NFT_KEY, - &[1], - ) - } - fn unlock(collection: &CollectionId, item: &ItemId) -> DispatchResult { - >::clear_attribute(collection, item, LOCKED_NFT_KEY) - } -} - impl pallet_nft_fractionalization::Config for Runtime { type RuntimeEvent = RuntimeEvent; type Deposit = AssetDeposit; @@ -1653,7 +1646,7 @@ impl pallet_nft_fractionalization::Config for Runtime { type AssetId = ::AssetId; type Assets = Assets; type Nfts = Nfts; - type NftLocker = RuntimeLockableNonfungible; + type NftLocker = NftLocker; type PalletId = NftFractionalizationPalletId; type WeightInfo = pallet_nft_fractionalization::weights::SubstrateWeight; } diff --git a/frame/nft-fractionalization/src/lib.rs b/frame/nft-fractionalization/src/lib.rs index 32397a5bae9bb..92b02d80784f5 100644 --- a/frame/nft-fractionalization/src/lib.rs +++ b/frame/nft-fractionalization/src/lib.rs @@ -56,10 +56,6 @@ pub use weights::WeightInfo; #[frame_support::pallet] pub mod pallet { use super::*; - use frame_support::{ - pallet_prelude::*, - traits::tokens::{AssetId, Balance as AssetBalance}, - }; use frame_system::pallet_prelude::*; use scale_info::prelude::{format, string::String}; use sp_runtime::traits::{One, Zero}; @@ -67,16 +63,18 @@ pub mod pallet { use frame_support::{ dispatch::DispatchResult, + pallet_prelude::*, sp_runtime::traits::{AccountIdConversion, StaticLookup}, traits::{ fungibles::{ metadata::{CalcMetadataDeposit, Mutate as MutateMetadata}, Create, Destroy, Inspect, Mutate, }, - tokens::nonfungibles_v2::{ - Inspect as NonFungiblesInspect, LockableNonfungible, Transfer, + tokens::{ + nonfungibles_v2::{Inspect as NonFungiblesInspect, Transfer}, + AssetId, Balance as AssetBalance, }, - Currency, ExistenceRequirement, ReservableCurrency, + Currency, ExistenceRequirement, Locker, ReservableCurrency, }, PalletId, }; @@ -132,7 +130,7 @@ pub mod pallet { > + Transfer; /// Locker trait to enable NFT's locking. - type NftLocker: LockableNonfungible; + type NftLocker: Locker; /// The pallet's id, used for deriving its sovereign account ID. #[pallet::constant] diff --git a/frame/nft-fractionalization/src/mock.rs b/frame/nft-fractionalization/src/mock.rs index a378348f9b838..bf493edfd51cd 100644 --- a/frame/nft-fractionalization/src/mock.rs +++ b/frame/nft-fractionalization/src/mock.rs @@ -23,10 +23,7 @@ use crate as pallet_nft_fractionalization; use frame_support::{ construct_runtime, parameter_types, traits::{ - tokens::{ - nonfungibles_v2::{Inspect, LockableNonfungible, Mutate}, - AttributeNamespace, - }, + tokens::nonfungibles_v2::{Inspect, Mutate}, AsEnsureOriginWithArg, ConstU32, ConstU64, Locker, }, PalletId, @@ -36,13 +33,15 @@ use pallet_nfts::{ItemConfig, PalletFeatures}; use sp_core::H256; use sp_runtime::{ testing::Header, - traits::{BlakeTwo256, IdentityLookup}, - DispatchResult, + traits::{BlakeTwo256, IdentifyAccount, IdentityLookup, Verify}, + DispatchResult, MultiSignature, }; type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; type Block = frame_system::mocking::MockBlock; -type AccountId = ::AccountId; +type Signature = MultiSignature; +type AccountPublic = ::Signer; +type AccountId = ::AccountId; type ItemId = ::ItemId; type CollectionId = ::CollectionId; @@ -70,7 +69,7 @@ impl frame_system::Config for Test { type BlockNumber = u64; type Hash = H256; type Hashing = BlakeTwo256; - type AccountId = u64; + type AccountId = AccountId; type Lookup = IdentityLookup; type Header = Header; type RuntimeEvent = RuntimeEvent; @@ -106,8 +105,8 @@ impl pallet_assets::Config for Test { type AssetId = u32; type AssetIdParameter = u32; type Currency = Balances; - type CreateOrigin = AsEnsureOriginWithArg>; - type ForceOrigin = frame_system::EnsureRoot; + type CreateOrigin = AsEnsureOriginWithArg>; + type ForceOrigin = frame_system::EnsureRoot; type AssetDeposit = ConstU64<1>; type AssetAccountDeposit = ConstU64<10>; type MetadataDepositBase = ConstU64<1>; @@ -132,7 +131,18 @@ const LOCKED_NFT_KEY: &[u8; 6] = b"locked"; pub struct TestLocker; impl Locker for TestLocker { fn is_locked(collection: CollectionId, item: ItemId) -> bool { - Nfts::attribute(&collection, &item, &AttributeNamespace::Pallet, LOCKED_NFT_KEY).is_some() + >::system_attribute(&collection, &item, LOCKED_NFT_KEY).is_some() + } + fn lock(collection: &CollectionId, item: &ItemId) -> DispatchResult { + >::set_attribute( + collection, + item, + LOCKED_NFT_KEY, + &[1], + ) + } + fn unlock(collection: &CollectionId, item: &ItemId) -> DispatchResult { + >::clear_attribute(collection, item, LOCKED_NFT_KEY) } } @@ -141,8 +151,8 @@ impl pallet_nfts::Config for Test { type CollectionId = u32; type ItemId = u32; type Currency = Balances; - type CreateOrigin = AsEnsureOriginWithArg>; - type ForceOrigin = frame_system::EnsureRoot; + type CreateOrigin = AsEnsureOriginWithArg>; + type ForceOrigin = frame_system::EnsureRoot; type Locker = TestLocker; type CollectionDeposit = ConstU64<2>; type ItemDeposit = ConstU64<1>; @@ -156,7 +166,10 @@ impl pallet_nfts::Config for Test { type ItemAttributesApprovalsLimit = ConstU32<2>; type MaxTips = ConstU32<10>; type MaxDeadlineDuration = ConstU64<10000>; + type MaxAttributesPerCall = ConstU32<2>; type Features = Features; + type OffchainSignature = Signature; + type OffchainPublic = AccountPublic; type WeightInfo = (); pallet_nfts::runtime_benchmarks_enabled! { type Helper = (); @@ -169,21 +182,6 @@ parameter_types! { pub NewAssetName: Vec = (*b"Frac").into(); } -pub struct MockLockableNonfungible; -impl LockableNonfungible for MockLockableNonfungible { - fn lock(collection: &CollectionId, item: &ItemId) -> DispatchResult { - >::set_attribute( - collection, - item, - LOCKED_NFT_KEY, - &[1], - ) - } - fn unlock(collection: &CollectionId, item: &ItemId) -> DispatchResult { - >::clear_attribute(collection, item, LOCKED_NFT_KEY) - } -} - impl Config for Test { type RuntimeEvent = RuntimeEvent; type Deposit = ConstU64<1>; @@ -197,7 +195,7 @@ impl Config for Test { type Assets = Assets; type Nfts = Nfts; type PalletId = NftFractionalizationPalletId; - type NftLocker = MockLockableNonfungible; + type NftLocker = TestLocker; type WeightInfo = (); } diff --git a/frame/nft-fractionalization/src/tests.rs b/frame/nft-fractionalization/src/tests.rs index 6b9ec7495c8cc..98a7c6271191b 100644 --- a/frame/nft-fractionalization/src/tests.rs +++ b/frame/nft-fractionalization/src/tests.rs @@ -52,6 +52,12 @@ fn events() -> Vec> { result } +type AccountIdOf = ::AccountId; + +fn account(id: u8) -> AccountIdOf { + [id; 32].into() +} + #[test] fn fractionalize_should_work() { new_test_ext().execute_with(|| { @@ -60,28 +66,43 @@ fn fractionalize_should_work() { let asset_id = 0; let fractions = 1000; - Balances::make_free_balance_be(&1, 100); - Balances::make_free_balance_be(&2, 100); + Balances::make_free_balance_be(&account(1), 100); + Balances::make_free_balance_be(&account(2), 100); - assert_ok!(Nfts::force_create(RuntimeOrigin::root(), 1, CollectionConfig::default())); - assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), nft_collection_id, nft_id, 1, None)); + assert_ok!(Nfts::force_create( + RuntimeOrigin::root(), + account(1), + CollectionConfig::default(), + )); + assert_ok!(Nfts::mint( + RuntimeOrigin::signed(account(1)), + nft_collection_id, + nft_id, + account(1), + None, + )); assert_ok!(NftFractionalization::fractionalize( - RuntimeOrigin::signed(1), + RuntimeOrigin::signed(account(1)), nft_collection_id, nft_id, asset_id, - 2, + account(2), fractions, )); assert_eq!(assets(), vec![asset_id]); - assert_eq!(Assets::balance(asset_id, 2), fractions); - assert_eq!(Balances::reserved_balance(&1), 2); + assert_eq!(Assets::balance(asset_id, account(2)), fractions); + assert_eq!(Balances::reserved_balance(&account(1)), 2); assert_eq!(String::from_utf8(Assets::name(0)).unwrap(), "Frac 0-0"); assert_eq!(String::from_utf8(Assets::symbol(0)).unwrap(), "FRAC"); - assert_eq!(Nfts::owner(nft_collection_id, nft_id), Some(1)); + assert_eq!(Nfts::owner(nft_collection_id, nft_id), Some(account(1))); assert_noop!( - Nfts::transfer(RuntimeOrigin::signed(1), nft_collection_id, nft_id, 2), + Nfts::transfer( + RuntimeOrigin::signed(account(1)), + nft_collection_id, + nft_id, + account(2), + ), DispatchError::Module(ModuleError { index: 4, error: [12, 0, 0, 0], @@ -98,30 +119,36 @@ fn fractionalize_should_work() { nft: nft_id, fractions, asset: asset_id, - beneficiary: 2, + beneficiary: account(2), })); let nft_id = nft_id + 1; assert_noop!( NftFractionalization::fractionalize( - RuntimeOrigin::signed(1), + RuntimeOrigin::signed(account(1)), nft_collection_id, nft_id, asset_id, - 2, + account(2), fractions, ), Error::::NftNotFound ); - assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), nft_collection_id, nft_id, 2, None)); + assert_ok!(Nfts::mint( + RuntimeOrigin::signed(account(1)), + nft_collection_id, + nft_id, + account(2), + None + )); assert_noop!( NftFractionalization::fractionalize( - RuntimeOrigin::signed(1), + RuntimeOrigin::signed(account(1)), nft_collection_id, nft_id, asset_id, - 2, + account(2), fractions, ), Error::::NoPermission @@ -137,37 +164,47 @@ fn unify_should_work() { let asset_id = 0; let fractions = 1000; - Balances::make_free_balance_be(&1, 100); - Balances::make_free_balance_be(&2, 100); + Balances::make_free_balance_be(&account(1), 100); + Balances::make_free_balance_be(&account(2), 100); - assert_ok!(Nfts::force_create(RuntimeOrigin::root(), 1, CollectionConfig::default())); - assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), nft_collection_id, nft_id, 1, None)); + assert_ok!(Nfts::force_create( + RuntimeOrigin::root(), + account(1), + CollectionConfig::default(), + )); + assert_ok!(Nfts::mint( + RuntimeOrigin::signed(account(1)), + nft_collection_id, + nft_id, + account(1), + None, + )); assert_ok!(NftFractionalization::fractionalize( - RuntimeOrigin::signed(1), + RuntimeOrigin::signed(account(1)), nft_collection_id, nft_id, asset_id, - 2, + account(2), fractions, )); assert_noop!( NftFractionalization::unify( - RuntimeOrigin::signed(2), + RuntimeOrigin::signed(account(2)), nft_collection_id + 1, nft_id, asset_id, - 1, + account(1), ), Error::::DataNotFound ); assert_noop!( NftFractionalization::unify( - RuntimeOrigin::signed(2), + RuntimeOrigin::signed(account(2)), nft_collection_id, nft_id, asset_id + 1, - 1, + account(1), ), Error::::DataNotFound ); @@ -175,11 +212,11 @@ fn unify_should_work() { // can't unify the asset a user doesn't hold assert_noop!( NftFractionalization::unify( - RuntimeOrigin::signed(1), + RuntimeOrigin::signed(account(1)), nft_collection_id, nft_id, asset_id, - 1 + account(1), ), DispatchError::Module(ModuleError { index: 2, @@ -189,45 +226,45 @@ fn unify_should_work() { ); assert_ok!(NftFractionalization::unify( - RuntimeOrigin::signed(2), + RuntimeOrigin::signed(account(2)), nft_collection_id, nft_id, asset_id, - 1, + account(1), )); - assert_eq!(Assets::balance(asset_id, 2), 0); - assert_eq!(Balances::reserved_balance(&1), 1); - assert_eq!(Nfts::owner(nft_collection_id, nft_id), Some(1)); + assert_eq!(Assets::balance(asset_id, account(2)), 0); + assert_eq!(Balances::reserved_balance(&account(1)), 1); + assert_eq!(Nfts::owner(nft_collection_id, nft_id), Some(account(1))); assert!(!NftToAsset::::contains_key((&nft_collection_id, &nft_id))); assert!(events().contains(&Event::::NftUnified { nft_collection: nft_collection_id, nft: nft_id, asset: asset_id, - beneficiary: 1, + beneficiary: account(1), })); // validate we need to hold the full balance to un-fractionalize the NFT let asset_id = asset_id + 1; assert_ok!(NftFractionalization::fractionalize( - RuntimeOrigin::signed(1), + RuntimeOrigin::signed(account(1)), nft_collection_id, nft_id, asset_id, - 1, + account(1), fractions, )); - assert_ok!(Assets::transfer(RuntimeOrigin::signed(1), asset_id, 2, 1)); - assert_eq!(Assets::balance(asset_id, 1), fractions - 1); - assert_eq!(Assets::balance(asset_id, 2), 1); + assert_ok!(Assets::transfer(RuntimeOrigin::signed(account(1)), asset_id, account(2), 1)); + assert_eq!(Assets::balance(asset_id, account(1)), fractions - 1); + assert_eq!(Assets::balance(asset_id, account(2)), 1); assert_noop!( NftFractionalization::unify( - RuntimeOrigin::signed(1), + RuntimeOrigin::signed(account(1)), nft_collection_id, nft_id, asset_id, - 1 + account(1), ), DispatchError::Module(ModuleError { index: 2, @@ -236,14 +273,14 @@ fn unify_should_work() { }) ); - assert_ok!(Assets::transfer(RuntimeOrigin::signed(2), asset_id, 1, 1)); + assert_ok!(Assets::transfer(RuntimeOrigin::signed(account(2)), asset_id, account(1), 1)); assert_ok!(NftFractionalization::unify( - RuntimeOrigin::signed(1), + RuntimeOrigin::signed(account(1)), nft_collection_id, nft_id, asset_id, - 2, + account(2), )); - assert_eq!(Nfts::owner(nft_collection_id, nft_id), Some(2)); + assert_eq!(Nfts::owner(nft_collection_id, nft_id), Some(account(2))); }); } diff --git a/frame/support/src/traits/tokens/misc.rs b/frame/support/src/traits/tokens/misc.rs index b4bd4640116a7..5cefa2048a3a1 100644 --- a/frame/support/src/traits/tokens/misc.rs +++ b/frame/support/src/traits/tokens/misc.rs @@ -20,7 +20,7 @@ use codec::{Decode, Encode, FullCodec, MaxEncodedLen}; use sp_arithmetic::traits::{AtLeast32BitUnsigned, Zero}; use sp_core::RuntimeDebug; -use sp_runtime::{ArithmeticError, DispatchError, TokenError}; +use sp_runtime::{ArithmeticError, DispatchError, DispatchResult, TokenError}; use sp_std::fmt::Debug; /// One of a number of consequences of withdrawing a fungible from an account. @@ -210,11 +210,15 @@ pub trait BalanceConversion { fn to_asset_balance(balance: InBalance, asset_id: AssetId) -> Result; } -/// Trait to handle asset locking mechanism to ensure interactions with the asset can be implemented -/// downstream to extend logic of Uniques current functionality. +/// Trait to handle NFT locking mechanism to ensure interactions with the asset can be implemented +/// downstream to extend logic of Uniques/Nfts current functionality. pub trait Locker { /// Check if the asset should be locked and prevent interactions with the asset from executing. fn is_locked(collection: CollectionId, item: ItemId) -> bool; + /// Lock `item` of `collection`. + fn lock(collection: &CollectionId, item: &ItemId) -> DispatchResult; + /// Unlock `item` of `collection`. + fn unlock(collection: &CollectionId, item: &ItemId) -> DispatchResult; } impl Locker for () { @@ -224,4 +228,16 @@ impl Locker for () { fn is_locked(_collection: CollectionId, _item: ItemId) -> bool { false } + /// Lock `item` of `collection`. + /// + /// By default, this is not a supported operation. + fn lock(_collection: &CollectionId, _item: &ItemId) -> DispatchResult { + Err(TokenError::Unsupported.into()) + } + /// Unlock `item` of `collection`. + /// + /// By default, this is not a supported operation. + fn unlock(_collection: &CollectionId, _item: &ItemId) -> DispatchResult { + Err(TokenError::Unsupported.into()) + } } diff --git a/frame/support/src/traits/tokens/nonfungibles_v2.rs b/frame/support/src/traits/tokens/nonfungibles_v2.rs index ca182c4f54790..5deb0c568f431 100644 --- a/frame/support/src/traits/tokens/nonfungibles_v2.rs +++ b/frame/support/src/traits/tokens/nonfungibles_v2.rs @@ -339,11 +339,3 @@ pub trait Transfer: Inspect { destination: &AccountId, ) -> DispatchResult; } - -/// Trait for locking and unlocking non-fungible sets of items. -pub trait LockableNonfungible { - /// Lock `item` of `collection`. - fn lock(collection: &CollectionId, item: &ItemId) -> DispatchResult; - /// Unlock `item` of `collection`. - fn unlock(collection: &CollectionId, item: &ItemId) -> DispatchResult; -} From aabff98c9cd9f536421cc6284ff68012d61572b6 Mon Sep 17 00:00:00 2001 From: Jegor Sidorenko Date: Wed, 1 Mar 2023 12:40:57 +0200 Subject: [PATCH 085/101] Refactor NFTs locker --- bin/node/runtime/src/lib.rs | 27 +++++-------------- frame/nft-fractionalization/src/lib.rs | 9 +++---- frame/nft-fractionalization/src/mock.rs | 23 +++------------- frame/nfts/src/impl_nonfungibles.rs | 17 ++++++++++++ frame/nfts/src/lib.rs | 1 + frame/support/src/traits/tokens/misc.rs | 18 +------------ .../src/traits/tokens/nonfungible_v2.rs | 12 ++++++++- .../src/traits/tokens/nonfungibles_v2.rs | 16 ++++++++++- 8 files changed, 57 insertions(+), 66 deletions(-) diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index a0b6f07efb951..d96a37b73fe2b 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -32,11 +32,10 @@ use frame_support::{ pallet_prelude::Get, parameter_types, traits::{ - fungible::ItemOf, - tokens::nonfungibles_v2::{Inspect, Mutate}, - AsEnsureOriginWithArg, ConstBool, ConstU128, ConstU16, ConstU32, Currency, EitherOfDiverse, - EqualPrivilegeOnly, Everything, Imbalance, InstanceFilter, KeyOwnerProofSystem, - LockIdentifier, Locker, Nothing, OnUnbalanced, U128CurrencyToVote, WithdrawReasons, + fungible::ItemOf, tokens::nonfungibles_v2::Inspect, AsEnsureOriginWithArg, ConstBool, + ConstU128, ConstU16, ConstU32, Currency, EitherOfDiverse, EqualPrivilegeOnly, Everything, + Imbalance, InstanceFilter, KeyOwnerProofSystem, LockIdentifier, Locker, Nothing, + OnUnbalanced, U128CurrencyToVote, WithdrawReasons, }, weights::{ constants::{ @@ -57,7 +56,7 @@ use pallet_grandpa::{ fg_primitives, AuthorityId as GrandpaId, AuthorityList as GrandpaAuthorityList, }; use pallet_im_online::sr25519::AuthorityId as ImOnlineId; -use pallet_nfts::{ItemConfig, PalletFeatures}; +use pallet_nfts::{PalletFeatures, LOCKED_NFT_KEY}; use pallet_nis::WithMaximumOf; use pallet_session::historical::{self as pallet_session_historical}; pub use pallet_transaction_payment::{CurrencyAdapter, Multiplier, TargetedFeeAdjustment}; @@ -75,8 +74,7 @@ use sp_runtime::{ SaturatedConversion, StaticLookup, }, transaction_validity::{TransactionPriority, TransactionSource, TransactionValidity}, - ApplyExtrinsicResult, DispatchResult, FixedPointNumber, FixedU128, Perbill, Percent, Permill, - Perquintill, + ApplyExtrinsicResult, FixedPointNumber, FixedU128, Perbill, Percent, Permill, Perquintill, }; use sp_std::prelude::*; #[cfg(any(feature = "std", test))] @@ -1577,7 +1575,6 @@ parameter_types! { pub const MaxAttributesPerCall: u32 = 10; } -const LOCKED_NFT_KEY: &[u8; 6] = b"locked"; type ItemId = ::ItemId; type CollectionId = ::CollectionId; @@ -1586,17 +1583,6 @@ impl Locker for NftLocker { fn is_locked(collection: CollectionId, item: ItemId) -> bool { >::system_attribute(&collection, &item, LOCKED_NFT_KEY).is_some() } - fn lock(collection: &CollectionId, item: &ItemId) -> DispatchResult { - >::set_attribute( - collection, - item, - LOCKED_NFT_KEY, - &[1], - ) - } - fn unlock(collection: &CollectionId, item: &ItemId) -> DispatchResult { - >::clear_attribute(collection, item, LOCKED_NFT_KEY) - } } impl pallet_nfts::Config for Runtime { @@ -1646,7 +1632,6 @@ impl pallet_nft_fractionalization::Config for Runtime { type AssetId = ::AssetId; type Assets = Assets; type Nfts = Nfts; - type NftLocker = NftLocker; type PalletId = NftFractionalizationPalletId; type WeightInfo = pallet_nft_fractionalization::weights::SubstrateWeight; } diff --git a/frame/nft-fractionalization/src/lib.rs b/frame/nft-fractionalization/src/lib.rs index 92b02d80784f5..0134b95d22d30 100644 --- a/frame/nft-fractionalization/src/lib.rs +++ b/frame/nft-fractionalization/src/lib.rs @@ -74,7 +74,7 @@ pub mod pallet { nonfungibles_v2::{Inspect as NonFungiblesInspect, Transfer}, AssetId, Balance as AssetBalance, }, - Currency, ExistenceRequirement, Locker, ReservableCurrency, + Currency, ExistenceRequirement, ReservableCurrency, }, PalletId, }; @@ -129,9 +129,6 @@ pub mod pallet { CollectionId = Self::NftCollectionId, > + Transfer; - /// Locker trait to enable NFT's locking. - type NftLocker: Locker; - /// The pallet's id, used for deriving its sovereign account ID. #[pallet::constant] type PalletId: Get; @@ -310,7 +307,7 @@ pub mod pallet { /// Transfer the NFT from the account holding that NFT to the pallet's account. fn do_lock_nft(nft_collection_id: T::NftCollectionId, nft_id: T::NftId) -> DispatchResult { - T::NftLocker::lock(&nft_collection_id, &nft_id) + T::Nfts::lock(&nft_collection_id, &nft_id) } /// Transfer the NFT to the account returning the tokens. @@ -319,7 +316,7 @@ pub mod pallet { nft_id: T::NftId, account: &T::AccountId, ) -> DispatchResult { - T::NftLocker::unlock(&nft_collection_id, &nft_id)?; + T::Nfts::unlock(&nft_collection_id, &nft_id)?; T::Nfts::transfer(&nft_collection_id, &nft_id, account) } diff --git a/frame/nft-fractionalization/src/mock.rs b/frame/nft-fractionalization/src/mock.rs index bf493edfd51cd..1e01c1098c242 100644 --- a/frame/nft-fractionalization/src/mock.rs +++ b/frame/nft-fractionalization/src/mock.rs @@ -22,19 +22,16 @@ use crate as pallet_nft_fractionalization; use frame_support::{ construct_runtime, parameter_types, - traits::{ - tokens::nonfungibles_v2::{Inspect, Mutate}, - AsEnsureOriginWithArg, ConstU32, ConstU64, Locker, - }, + traits::{tokens::nonfungibles_v2::Inspect, AsEnsureOriginWithArg, ConstU32, ConstU64, Locker}, PalletId, }; use frame_system::EnsureSigned; -use pallet_nfts::{ItemConfig, PalletFeatures}; +use pallet_nfts::{PalletFeatures, LOCKED_NFT_KEY}; use sp_core::H256; use sp_runtime::{ testing::Header, traits::{BlakeTwo256, IdentifyAccount, IdentityLookup, Verify}, - DispatchResult, MultiSignature, + MultiSignature, }; type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; @@ -126,24 +123,11 @@ parameter_types! { pub storage Features: PalletFeatures = PalletFeatures::all_enabled(); } -const LOCKED_NFT_KEY: &[u8; 6] = b"locked"; - pub struct TestLocker; impl Locker for TestLocker { fn is_locked(collection: CollectionId, item: ItemId) -> bool { >::system_attribute(&collection, &item, LOCKED_NFT_KEY).is_some() } - fn lock(collection: &CollectionId, item: &ItemId) -> DispatchResult { - >::set_attribute( - collection, - item, - LOCKED_NFT_KEY, - &[1], - ) - } - fn unlock(collection: &CollectionId, item: &ItemId) -> DispatchResult { - >::clear_attribute(collection, item, LOCKED_NFT_KEY) - } } impl pallet_nfts::Config for Test { @@ -195,7 +179,6 @@ impl Config for Test { type Assets = Assets; type Nfts = Nfts; type PalletId = NftFractionalizationPalletId; - type NftLocker = TestLocker; type WeightInfo = (); } diff --git a/frame/nfts/src/impl_nonfungibles.rs b/frame/nfts/src/impl_nonfungibles.rs index ef6bbe7656ef8..8269825c64b7b 100644 --- a/frame/nfts/src/impl_nonfungibles.rs +++ b/frame/nfts/src/impl_nonfungibles.rs @@ -322,6 +322,23 @@ impl, I: 'static> Transfer for Pallet { ) -> DispatchResult { Self::do_transfer(*collection, *item, destination.clone(), |_, _| Ok(())) } + + fn lock(collection: &Self::CollectionId, item: &Self::ItemId) -> DispatchResult { + >::set_attribute( + collection, + item, + LOCKED_NFT_KEY, + &[1], + ) + } + + fn unlock(collection: &Self::CollectionId, item: &Self::ItemId) -> DispatchResult { + >::clear_attribute( + collection, + item, + LOCKED_NFT_KEY, + ) + } } impl, I: 'static> InspectEnumerable for Pallet { diff --git a/frame/nfts/src/lib.rs b/frame/nfts/src/lib.rs index 2c3ab290cf569..c9f1a70c612df 100644 --- a/frame/nfts/src/lib.rs +++ b/frame/nfts/src/lib.rs @@ -61,6 +61,7 @@ pub use types::*; pub use weights::WeightInfo; type AccountIdLookupOf = <::Lookup as StaticLookup>::Source; +pub const LOCKED_NFT_KEY: &[u8; 6] = b"locked"; #[frame_support::pallet] pub mod pallet { diff --git a/frame/support/src/traits/tokens/misc.rs b/frame/support/src/traits/tokens/misc.rs index 5cefa2048a3a1..31c560b9b6e5c 100644 --- a/frame/support/src/traits/tokens/misc.rs +++ b/frame/support/src/traits/tokens/misc.rs @@ -20,7 +20,7 @@ use codec::{Decode, Encode, FullCodec, MaxEncodedLen}; use sp_arithmetic::traits::{AtLeast32BitUnsigned, Zero}; use sp_core::RuntimeDebug; -use sp_runtime::{ArithmeticError, DispatchError, DispatchResult, TokenError}; +use sp_runtime::{ArithmeticError, DispatchError, TokenError}; use sp_std::fmt::Debug; /// One of a number of consequences of withdrawing a fungible from an account. @@ -215,10 +215,6 @@ pub trait BalanceConversion { pub trait Locker { /// Check if the asset should be locked and prevent interactions with the asset from executing. fn is_locked(collection: CollectionId, item: ItemId) -> bool; - /// Lock `item` of `collection`. - fn lock(collection: &CollectionId, item: &ItemId) -> DispatchResult; - /// Unlock `item` of `collection`. - fn unlock(collection: &CollectionId, item: &ItemId) -> DispatchResult; } impl Locker for () { @@ -228,16 +224,4 @@ impl Locker for () { fn is_locked(_collection: CollectionId, _item: ItemId) -> bool { false } - /// Lock `item` of `collection`. - /// - /// By default, this is not a supported operation. - fn lock(_collection: &CollectionId, _item: &ItemId) -> DispatchResult { - Err(TokenError::Unsupported.into()) - } - /// Unlock `item` of `collection`. - /// - /// By default, this is not a supported operation. - fn unlock(_collection: &CollectionId, _item: &ItemId) -> DispatchResult { - Err(TokenError::Unsupported.into()) - } } diff --git a/frame/support/src/traits/tokens/nonfungible_v2.rs b/frame/support/src/traits/tokens/nonfungible_v2.rs index 175d94324aaa4..118ed4313255a 100644 --- a/frame/support/src/traits/tokens/nonfungible_v2.rs +++ b/frame/support/src/traits/tokens/nonfungible_v2.rs @@ -170,10 +170,14 @@ pub trait Mutate: Inspect { } } -/// Trait for transferring a non-fungible item. +/// Trait for transferring and locking/unlocking non-fungible sets of items. pub trait Transfer: Inspect { /// Transfer `item` into `destination` account. fn transfer(item: &Self::ItemId, destination: &AccountId) -> DispatchResult; + /// Disable the `item` transfer. + fn lock(item: &Self::ItemId) -> DispatchResult; + /// Re-enable the `item` transfer. + fn unlock(item: &Self::ItemId) -> DispatchResult; } /// Convert a `nonfungibles` trait implementation into a `nonfungible` trait implementation by @@ -309,4 +313,10 @@ impl< fn transfer(item: &Self::ItemId, destination: &AccountId) -> DispatchResult { >::transfer(&A::get(), item, destination) } + fn lock(item: &Self::ItemId) -> DispatchResult { + >::lock(&A::get(), item) + } + fn unlock(item: &Self::ItemId) -> DispatchResult { + >::unlock(&A::get(), item) + } } diff --git a/frame/support/src/traits/tokens/nonfungibles_v2.rs b/frame/support/src/traits/tokens/nonfungibles_v2.rs index 5deb0c568f431..6119148b5f64e 100644 --- a/frame/support/src/traits/tokens/nonfungibles_v2.rs +++ b/frame/support/src/traits/tokens/nonfungibles_v2.rs @@ -330,7 +330,7 @@ pub trait Mutate: Inspect { } } -/// Trait for transferring non-fungible sets of items. +/// Trait for transferring and locking/unlocking non-fungible sets of items. pub trait Transfer: Inspect { /// Transfer `item` of `collection` into `destination` account. fn transfer( @@ -338,4 +338,18 @@ pub trait Transfer: Inspect { item: &Self::ItemId, destination: &AccountId, ) -> DispatchResult; + + /// Disable the `item` of `collection` transfer. + /// + /// By default, this is not a supported operation. + fn lock(_collection: &Self::CollectionId, _item: &Self::ItemId) -> DispatchResult { + Err(TokenError::Unsupported.into()) + } + + /// Re-enable the `item` of `collection` transfer. + /// + /// By default, this is not a supported operation. + fn unlock(_collection: &Self::CollectionId, _item: &Self::ItemId) -> DispatchResult { + Err(TokenError::Unsupported.into()) + } } From 447df48a565f0923766fef591070cccc5d0f7d9c Mon Sep 17 00:00:00 2001 From: lana-shanghai Date: Tue, 28 Mar 2023 16:19:58 +0200 Subject: [PATCH 086/101] Replace Vec with BoundedVec, add clearer errors --- bin/node/runtime/src/lib.rs | 7 +++-- frame/assets/src/functions.rs | 4 +-- frame/assets/src/impl_fungibles.rs | 6 ++-- frame/nft-fractionalization/src/lib.rs | 29 ++++++++++++------- .../src/traits/tokens/fungibles/metadata.rs | 2 +- 5 files changed, 28 insertions(+), 20 deletions(-) diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index d96a37b73fe2b..e59852cc5daea 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -43,7 +43,7 @@ use frame_support::{ }, ConstantMultiplier, IdentityFee, Weight, }, - PalletId, RuntimeDebug, + BoundedVec, PalletId, RuntimeDebug, }; use frame_system::{ limits::{BlockLength, BlockWeights}, @@ -1616,8 +1616,8 @@ impl pallet_nfts::Config for Runtime { parameter_types! { pub const NftFractionalizationPalletId: PalletId = PalletId(*b"fraction"); - pub NewAssetSymbol: Vec = (*b"FRAC").into(); - pub NewAssetName: Vec = (*b"Frac").into(); + pub NewAssetSymbol: BoundedVec = (*b"FRAC").to_vec().try_into().unwrap(); + pub NewAssetName: BoundedVec = (*b"Frac").to_vec().try_into().unwrap(); } impl pallet_nft_fractionalization::Config for Runtime { @@ -1626,6 +1626,7 @@ impl pallet_nft_fractionalization::Config for Runtime { type Currency = Balances; type NewAssetSymbol = NewAssetSymbol; type NewAssetName = NewAssetName; + type StringLimit = StringLimit; type NftCollectionId = ::CollectionId; type NftId = ::ItemId; type AssetBalance = ::Balance; diff --git a/frame/assets/src/functions.rs b/frame/assets/src/functions.rs index 639fe3b58240c..7a1ad409afd1f 100644 --- a/frame/assets/src/functions.rs +++ b/frame/assets/src/functions.rs @@ -925,8 +925,8 @@ impl, I: 'static> Pallet { /// Calculate the metadata deposit for the provided data. pub(super) fn calc_metadata_deposit( - name: &Vec, - symbol: &Vec, + name: &[u8], + symbol: &[u8], ) -> DepositBalanceOf { T::MetadataDepositPerByte::get() .saturating_mul(((name.len() + symbol.len()) as u32).into()) diff --git a/frame/assets/src/impl_fungibles.rs b/frame/assets/src/impl_fungibles.rs index aa89fea7f3dc6..1248bb1d33605 100644 --- a/frame/assets/src/impl_fungibles.rs +++ b/frame/assets/src/impl_fungibles.rs @@ -237,10 +237,10 @@ impl, I: 'static> > for Pallet { fn calc( - name: &Vec, - symbol: &Vec, + name: &[u8], + symbol: &[u8], ) -> ::AccountId>>::Balance { - Self::calc_metadata_deposit(name, symbol) + Self::calc_metadata_deposit(&name, &symbol) } } diff --git a/frame/nft-fractionalization/src/lib.rs b/frame/nft-fractionalization/src/lib.rs index 0134b95d22d30..cea7770f0db72 100644 --- a/frame/nft-fractionalization/src/lib.rs +++ b/frame/nft-fractionalization/src/lib.rs @@ -76,7 +76,7 @@ pub mod pallet { }, Currency, ExistenceRequirement, ReservableCurrency, }, - PalletId, + BoundedVec, PalletId, }; pub type AssetIdOf = @@ -135,11 +135,15 @@ pub mod pallet { /// The newly created asset's symbol. #[pallet::constant] - type NewAssetSymbol: Get>; + type NewAssetSymbol: Get>; /// The newly created asset's name. #[pallet::constant] - type NewAssetName: Get>; + type NewAssetName: Get>; + + /// The maximum length of a name or symbol stored on-chain. + #[pallet::constant] + type StringLimit: Get; /// Weight information for extrinsics in this pallet. type WeightInfo: WeightInfo; @@ -178,12 +182,14 @@ pub mod pallet { #[pallet::error] pub enum Error { - /// Information about the fractionalized NFT can't be found. - DataNotFound, + /// Asset ID does not correspond to locked NFT. + IncorrectAssetId, /// The signing account has no permission to do the operation. NoPermission, /// NFT doesn't exist. NftNotFound, + /// NFT has not yet been fractionalised. + NftNotFractionalized, } #[pallet::call] @@ -275,8 +281,8 @@ pub mod pallet { let beneficiary = T::Lookup::lookup(beneficiary)?; NftToAsset::::try_mutate_exists((nft_collection_id, nft_id), |maybe_details| { - let details = maybe_details.take().ok_or(Error::::DataNotFound)?; - ensure!(details.asset == asset_id, Error::::DataNotFound); + let details = maybe_details.take().ok_or(Error::::NftNotFractionalized)?; + ensure!(details.asset == asset_id, Error::::IncorrectAssetId); let deposit = details.deposit; let asset_creator = details.asset_creator; @@ -352,10 +358,11 @@ pub mod pallet { nft_collection_id: &T::NftCollectionId, nft_id: &T::NftId, ) -> DispatchResult { - let symbol = T::NewAssetSymbol::get(); - let name = T::NewAssetName::get(); - let name = format!("{} {nft_collection_id}-{nft_id}", String::from_utf8_lossy(&name)); - let deposit = T::Assets::calc(&name.clone().into(), &symbol.clone().into()); + let name = format!("{} {nft_collection_id}-{nft_id}", String::from_utf8_lossy(&T::NewAssetName::get())); + let symbol: &[u8] = &T::NewAssetSymbol::get(); + let deposit = T::Assets::calc(name.as_bytes(), symbol); + + if deposit != Zero::zero() { T::Currency::transfer( &depositor, diff --git a/frame/support/src/traits/tokens/fungibles/metadata.rs b/frame/support/src/traits/tokens/fungibles/metadata.rs index eb3d64644dc8c..c62d7a901c78d 100644 --- a/frame/support/src/traits/tokens/fungibles/metadata.rs +++ b/frame/support/src/traits/tokens/fungibles/metadata.rs @@ -42,5 +42,5 @@ pub trait Mutate: Inspect { pub trait CalcMetadataDeposit { // Returns the required deposit amount for a given metadata. - fn calc(name: &Vec, symbol: &Vec) -> DepositBalance; + fn calc(name: &[u8], symbol: &[u8]) -> DepositBalance; } From 50212e31684beb5b396e991e080a8fa0d6bba669 Mon Sep 17 00:00:00 2001 From: lana-shanghai Date: Tue, 28 Mar 2023 16:21:52 +0200 Subject: [PATCH 087/101] cargo fmt --- frame/assets/src/functions.rs | 5 +---- frame/nft-fractionalization/src/lib.rs | 8 +++++--- 2 files changed, 6 insertions(+), 7 deletions(-) diff --git a/frame/assets/src/functions.rs b/frame/assets/src/functions.rs index 7a1ad409afd1f..14ba651500ecd 100644 --- a/frame/assets/src/functions.rs +++ b/frame/assets/src/functions.rs @@ -924,10 +924,7 @@ impl, I: 'static> Pallet { } /// Calculate the metadata deposit for the provided data. - pub(super) fn calc_metadata_deposit( - name: &[u8], - symbol: &[u8], - ) -> DepositBalanceOf { + pub(super) fn calc_metadata_deposit(name: &[u8], symbol: &[u8]) -> DepositBalanceOf { T::MetadataDepositPerByte::get() .saturating_mul(((name.len() + symbol.len()) as u32).into()) .saturating_add(T::MetadataDepositBase::get()) diff --git a/frame/nft-fractionalization/src/lib.rs b/frame/nft-fractionalization/src/lib.rs index cea7770f0db72..3256a32eb126c 100644 --- a/frame/nft-fractionalization/src/lib.rs +++ b/frame/nft-fractionalization/src/lib.rs @@ -182,7 +182,7 @@ pub mod pallet { #[pallet::error] pub enum Error { - /// Asset ID does not correspond to locked NFT. + /// Asset ID does not correspond to locked NFT. IncorrectAssetId, /// The signing account has no permission to do the operation. NoPermission, @@ -358,11 +358,13 @@ pub mod pallet { nft_collection_id: &T::NftCollectionId, nft_id: &T::NftId, ) -> DispatchResult { - let name = format!("{} {nft_collection_id}-{nft_id}", String::from_utf8_lossy(&T::NewAssetName::get())); + let name = format!( + "{} {nft_collection_id}-{nft_id}", + String::from_utf8_lossy(&T::NewAssetName::get()) + ); let symbol: &[u8] = &T::NewAssetSymbol::get(); let deposit = T::Assets::calc(name.as_bytes(), symbol); - if deposit != Zero::zero() { T::Currency::transfer( &depositor, From 14bb74d2c0fc24a740523ec7b8afb35a71cbdd27 Mon Sep 17 00:00:00 2001 From: lana-shanghai Date: Tue, 28 Mar 2023 16:40:50 +0200 Subject: [PATCH 088/101] Add README about unlocking NFTs --- frame/nft-fractionalization/README.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/frame/nft-fractionalization/README.md b/frame/nft-fractionalization/README.md index ab073777915fe..9f0553f855332 100644 --- a/frame/nft-fractionalization/README.md +++ b/frame/nft-fractionalization/README.md @@ -1,3 +1,6 @@ ### Lock NFT Lock an NFT from `pallet-nfts` and mint fungible assets from `pallet-assets`. + +The NFT gets locked by putting a system-level attribute named `Locked`. This prevents the NFT from being transferred further. +The NFT becomes unlocked when the `Locked` attribute is removed. In order to unify the fungible asset and unlock the NFT, an account must hold the full balance of the asset the NFT was fractionalised into. Holding less of the fungible asset will not allow to unlock the NFT. From 9aa89e3c52175aea6554f9ac07863fe0d80ca155 Mon Sep 17 00:00:00 2001 From: lana-shanghai Date: Mon, 17 Apr 2023 12:01:43 +0200 Subject: [PATCH 089/101] add constant definition --- frame/nft-fractionalization/src/lib.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/frame/nft-fractionalization/src/lib.rs b/frame/nft-fractionalization/src/lib.rs index 3256a32eb126c..87bc8dc339c8d 100644 --- a/frame/nft-fractionalization/src/lib.rs +++ b/frame/nft-fractionalization/src/lib.rs @@ -99,6 +99,8 @@ pub mod pallet { /// The currency mechanism, used for paying for deposits. type Currency: ReservableCurrency; + /// The deposit paid by the user locking an NFT. The deposit is returned to the original NFT + /// owner when the asset is unified and the NFT is unlocked. #[pallet::constant] type Deposit: Get>; From d539da03fb9372783b5891861bcafa9d9b991bc4 Mon Sep 17 00:00:00 2001 From: lana-shanghai Date: Mon, 17 Apr 2023 12:49:36 +0200 Subject: [PATCH 090/101] add fortitude & precision to asset related functions --- bin/node/runtime/src/lib.rs | 2 +- frame/nft-fractionalization/src/lib.rs | 7 +++++-- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index 51b2c2d09cdf0..40eaac137c9ba 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -36,7 +36,7 @@ use frame_support::{ tokens::{nonfungibles_v2::Inspect, GetSalary, PayFromAccount}, AsEnsureOriginWithArg, ConstBool, ConstU128, ConstU16, ConstU32, Currency, EitherOfDiverse, EqualPrivilegeOnly, Everything, Imbalance, InstanceFilter, KeyOwnerProofSystem, - LockIdentifier, Nothing, OnUnbalanced, U128CurrencyToVote, WithdrawReasons, + LockIdentifier, Locker, Nothing, OnUnbalanced, U128CurrencyToVote, WithdrawReasons, }, weights::{ constants::{ diff --git a/frame/nft-fractionalization/src/lib.rs b/frame/nft-fractionalization/src/lib.rs index 87bc8dc339c8d..e0433a4d7d0a4 100644 --- a/frame/nft-fractionalization/src/lib.rs +++ b/frame/nft-fractionalization/src/lib.rs @@ -73,6 +73,8 @@ pub mod pallet { tokens::{ nonfungibles_v2::{Inspect as NonFungiblesInspect, Transfer}, AssetId, Balance as AssetBalance, + Fortitude::Polite, + Precision::Exact, }, Currency, ExistenceRequirement, ReservableCurrency, }, @@ -339,7 +341,8 @@ pub mod pallet { beneficiary: &T::AccountId, amount: AssetBalanceOf, ) -> DispatchResult { - T::Assets::mint_into(asset_id, beneficiary, amount) + T::Assets::mint_into(asset_id, beneficiary, amount)?; + Ok(()) } /// Burn tokens from the account. @@ -348,7 +351,7 @@ pub mod pallet { account: &T::AccountId, amount: AssetBalanceOf, ) -> DispatchResult { - T::Assets::burn_from(asset_id, account, amount)?; + T::Assets::burn_from(asset_id, account, amount, Exact, Polite)?; T::Assets::start_destroy(asset_id, None) } From ab7ad4a689fe6a6d5dc5fe28c59cc776b37010ad Mon Sep 17 00:00:00 2001 From: lana-shanghai Date: Mon, 17 Apr 2023 18:58:23 +0200 Subject: [PATCH 091/101] fix mock and tests --- frame/nft-fractionalization/src/mock.rs | 12 +++++++++--- frame/nft-fractionalization/src/tests.rs | 4 ++-- 2 files changed, 11 insertions(+), 5 deletions(-) diff --git a/frame/nft-fractionalization/src/mock.rs b/frame/nft-fractionalization/src/mock.rs index 1e01c1098c242..9d40664ff9fc8 100644 --- a/frame/nft-fractionalization/src/mock.rs +++ b/frame/nft-fractionalization/src/mock.rs @@ -23,7 +23,7 @@ use crate as pallet_nft_fractionalization; use frame_support::{ construct_runtime, parameter_types, traits::{tokens::nonfungibles_v2::Inspect, AsEnsureOriginWithArg, ConstU32, ConstU64, Locker}, - PalletId, + BoundedVec, PalletId, }; use frame_system::EnsureSigned; use pallet_nfts::{PalletFeatures, LOCKED_NFT_KEY}; @@ -93,6 +93,10 @@ impl pallet_balances::Config for Test { type MaxLocks = (); type MaxReserves = ConstU32<50>; type ReserveIdentifier = [u8; 8]; + type HoldIdentifier = (); + type MaxHolds = (); + type FreezeIdentifier = (); + type MaxFreezes = (); } impl pallet_assets::Config for Test { @@ -161,9 +165,10 @@ impl pallet_nfts::Config for Test { } parameter_types! { + pub const StringLimit: u32 = 50; pub const NftFractionalizationPalletId: PalletId = PalletId(*b"fraction"); - pub NewAssetSymbol: Vec = (*b"FRAC").into(); - pub NewAssetName: Vec = (*b"Frac").into(); + pub NewAssetSymbol: BoundedVec = (*b"FRAC").to_vec().try_into().unwrap(); + pub NewAssetName: BoundedVec = (*b"Frac").to_vec().try_into().unwrap(); } impl Config for Test { @@ -180,6 +185,7 @@ impl Config for Test { type Nfts = Nfts; type PalletId = NftFractionalizationPalletId; type WeightInfo = (); + type StringLimit = StringLimit; } // Build genesis storage according to the mock runtime. diff --git a/frame/nft-fractionalization/src/tests.rs b/frame/nft-fractionalization/src/tests.rs index 98a7c6271191b..f2219f550fb24 100644 --- a/frame/nft-fractionalization/src/tests.rs +++ b/frame/nft-fractionalization/src/tests.rs @@ -196,7 +196,7 @@ fn unify_should_work() { asset_id, account(1), ), - Error::::DataNotFound + Error::::NftNotFound ); assert_noop!( NftFractionalization::unify( @@ -206,7 +206,7 @@ fn unify_should_work() { asset_id + 1, account(1), ), - Error::::DataNotFound + Error::::IncorrectAssetId ); // can't unify the asset a user doesn't hold From 200ff452192adf653fc21f168776d1d60f28af91 Mon Sep 17 00:00:00 2001 From: lana-shanghai Date: Wed, 19 Apr 2023 13:50:29 +0200 Subject: [PATCH 092/101] transfer ExistentialDeposit to pallet if it's balance is below --- bin/node/runtime/src/lib.rs | 1 + frame/nft-fractionalization/src/lib.rs | 14 +++++++++++++- frame/nft-fractionalization/src/mock.rs | 1 + frame/nft-fractionalization/src/tests.rs | 16 ++++------------ 4 files changed, 19 insertions(+), 13 deletions(-) diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index 40eaac137c9ba..3de4036584f67 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -1653,6 +1653,7 @@ parameter_types! { impl pallet_nft_fractionalization::Config for Runtime { type RuntimeEvent = RuntimeEvent; type Deposit = AssetDeposit; + type ExistentialDeposit = ExistentialDeposit; type Currency = Balances; type NewAssetSymbol = NewAssetSymbol; type NewAssetName = NewAssetName; diff --git a/frame/nft-fractionalization/src/lib.rs b/frame/nft-fractionalization/src/lib.rs index e0433a4d7d0a4..945627e4f382c 100644 --- a/frame/nft-fractionalization/src/lib.rs +++ b/frame/nft-fractionalization/src/lib.rs @@ -106,6 +106,9 @@ pub mod pallet { #[pallet::constant] type Deposit: Get>; + #[pallet::constant] + type ExistentialDeposit: Get>; + /// Identifier for the collection of NFT. type NftCollectionId: Member + Parameter + MaxEncodedLen + Copy + Display; @@ -368,8 +371,17 @@ pub mod pallet { String::from_utf8_lossy(&T::NewAssetName::get()) ); let symbol: &[u8] = &T::NewAssetSymbol::get(); + let existential_deposit = T::ExistentialDeposit::get(); + let pallet_account_balance = T::Currency::free_balance(&pallet_account); + if pallet_account_balance < existential_deposit { + T::Currency::transfer( + &depositor, + &pallet_account, + existential_deposit, + ExistenceRequirement::KeepAlive, + )?; + } let deposit = T::Assets::calc(name.as_bytes(), symbol); - if deposit != Zero::zero() { T::Currency::transfer( &depositor, diff --git a/frame/nft-fractionalization/src/mock.rs b/frame/nft-fractionalization/src/mock.rs index 9d40664ff9fc8..326eb459cb84f 100644 --- a/frame/nft-fractionalization/src/mock.rs +++ b/frame/nft-fractionalization/src/mock.rs @@ -174,6 +174,7 @@ parameter_types! { impl Config for Test { type RuntimeEvent = RuntimeEvent; type Deposit = ConstU64<1>; + type ExistentialDeposit = ConstU64<1>; type Currency = Balances; type NewAssetSymbol = NewAssetSymbol; type NewAssetName = NewAssetName; diff --git a/frame/nft-fractionalization/src/tests.rs b/frame/nft-fractionalization/src/tests.rs index f2219f550fb24..33e2722411d40 100644 --- a/frame/nft-fractionalization/src/tests.rs +++ b/frame/nft-fractionalization/src/tests.rs @@ -26,7 +26,7 @@ use frame_support::{ }, }; use pallet_nfts::CollectionConfig; -use sp_runtime::{DispatchError, ModuleError}; +use sp_runtime::{DispatchError, ModuleError, TokenError::FundsUnavailable}; fn assets() -> Vec { let mut s: Vec<_> = <::Assets>::asset_ids().collect(); @@ -196,7 +196,7 @@ fn unify_should_work() { asset_id, account(1), ), - Error::::NftNotFound + Error::::NftNotFractionalized ); assert_noop!( NftFractionalization::unify( @@ -218,11 +218,7 @@ fn unify_should_work() { asset_id, account(1), ), - DispatchError::Module(ModuleError { - index: 2, - error: [1, 0, 0, 0], - message: Some("NoAccount") - }) + DispatchError::Token(FundsUnavailable) ); assert_ok!(NftFractionalization::unify( @@ -266,11 +262,7 @@ fn unify_should_work() { asset_id, account(1), ), - DispatchError::Module(ModuleError { - index: 2, - error: [0, 0, 0, 0], - message: Some("BalanceLow") - }) + DispatchError::Token(FundsUnavailable) ); assert_ok!(Assets::transfer(RuntimeOrigin::signed(account(2)), asset_id, account(1), 1)); From af4725d58c8332a1498a817268db80187af9960c Mon Sep 17 00:00:00 2001 From: Jegor Sidorenko Date: Wed, 17 May 2023 17:34:58 +0200 Subject: [PATCH 093/101] Refactoring --- bin/node/runtime/src/lib.rs | 64 ++++++++------- frame/assets/src/impl_fungibles.rs | 4 +- .../nft-fractionalization/src/benchmarking.rs | 45 +++++----- frame/nft-fractionalization/src/lib.rs | 82 +++++++++---------- frame/nft-fractionalization/src/mock.rs | 28 +++++-- frame/nft-fractionalization/src/tests.rs | 12 +-- frame/nft-fractionalization/src/types.rs | 39 +++++++++ frame/nft-fractionalization/src/weights.rs | 24 +++--- frame/nfts/src/impl_nonfungibles.rs | 7 +- .../src/traits/tokens/fungible/regular.rs | 3 +- .../src/traits/tokens/fungibles/metadata.rs | 4 +- .../src/traits/tokens/nonfungible_v2.rs | 23 +++++- .../src/traits/tokens/nonfungibles_v2.rs | 10 ++- 13 files changed, 217 insertions(+), 128 deletions(-) diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index 82f780c37f15d..c322f5566f612 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -33,7 +33,10 @@ use frame_support::{ parameter_types, traits::{ fungible::ItemOf, - tokens::{nonfungibles_v2::Inspect, GetSalary, PayFromAccount}, + tokens::{ + nonfungibles_v2::{Inspect, LockableNonfungible}, + GetSalary, PayFromAccount, + }, AsEnsureOriginWithArg, ConstBool, ConstU128, ConstU16, ConstU32, Currency, EitherOfDiverse, EqualPrivilegeOnly, Everything, Imbalance, InstanceFilter, KeyOwnerProofSystem, LockIdentifier, Locker, Nothing, OnUnbalanced, U128CurrencyToVote, WithdrawReasons, @@ -54,7 +57,7 @@ pub use node_primitives::{AccountId, Signature}; use node_primitives::{AccountIndex, Balance, BlockNumber, Hash, Index, Moment}; use pallet_election_provider_multi_phase::SolutionAccuracyOf; use pallet_im_online::sr25519::AuthorityId as ImOnlineId; -use pallet_nfts::{PalletFeatures, LOCKED_NFT_KEY}; +use pallet_nfts::PalletFeatures; use pallet_nis::WithMaximumOf; use pallet_session::historical as pallet_session_historical; pub use pallet_transaction_payment::{CurrencyAdapter, Multiplier, TargetedFeeAdjustment}; @@ -445,6 +448,8 @@ parameter_types! { pub enum HoldReason { /// The NIS Pallet has reserved it for a non-fungible receipt. Nis, + /// Used by the NFT Fractionalization Pallet. + NftFractionalization, } impl pallet_balances::Config for Runtime { @@ -460,7 +465,7 @@ impl pallet_balances::Config for Runtime { type FreezeIdentifier = (); type MaxFreezes = (); type HoldIdentifier = HoldReason; - type MaxHolds = ConstU32<1>; + type MaxHolds = ConstU32<2>; } parameter_types! { @@ -1612,6 +1617,33 @@ impl pallet_core_fellowship::Config for Runtime { type EvidenceSize = ConstU32<16_384>; } +parameter_types! { + pub const NftFractionalizationPalletId: PalletId = PalletId(*b"fraction"); + pub NewAssetSymbol: BoundedVec = (*b"FRAC").to_vec().try_into().unwrap(); + pub NewAssetName: BoundedVec = (*b"Frac").to_vec().try_into().unwrap(); + pub const NftFractionalizationHoldReason: HoldReason = HoldReason::NftFractionalization; +} + +impl pallet_nft_fractionalization::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type Deposit = AssetDeposit; + type Currency = Balances; + type NewAssetSymbol = NewAssetSymbol; + type NewAssetName = NewAssetName; + type StringLimit = StringLimit; + type NftCollectionId = ::CollectionId; + type NftId = ::ItemId; + type AssetBalance = ::Balance; + type AssetId = ::AssetId; + type Assets = Assets; + type Nfts = Nfts; + type PalletId = NftFractionalizationPalletId; + type WeightInfo = pallet_nft_fractionalization::weights::SubstrateWeight; + type HoldReason = NftFractionalizationHoldReason; + #[cfg(feature = "runtime-benchmarks")] + type BenchmarkHelper = (); +} + parameter_types! { pub Features: PalletFeatures = PalletFeatures::all_enabled(); pub const MaxAttributesPerCall: u32 = 10; @@ -1623,7 +1655,7 @@ type CollectionId = ::CollectionId; pub struct NftLocker; impl Locker for NftLocker { fn is_locked(collection: CollectionId, item: ItemId) -> bool { - >::system_attribute(&collection, &item, LOCKED_NFT_KEY).is_some() + Nfts::is_locked(&collection, &item) } } @@ -1656,30 +1688,6 @@ impl pallet_nfts::Config for Runtime { type Locker = NftLocker; } -parameter_types! { - pub const NftFractionalizationPalletId: PalletId = PalletId(*b"fraction"); - pub NewAssetSymbol: BoundedVec = (*b"FRAC").to_vec().try_into().unwrap(); - pub NewAssetName: BoundedVec = (*b"Frac").to_vec().try_into().unwrap(); -} - -impl pallet_nft_fractionalization::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type Deposit = AssetDeposit; - type ExistentialDeposit = ExistentialDeposit; - type Currency = Balances; - type NewAssetSymbol = NewAssetSymbol; - type NewAssetName = NewAssetName; - type StringLimit = StringLimit; - type NftCollectionId = ::CollectionId; - type NftId = ::ItemId; - type AssetBalance = ::Balance; - type AssetId = ::AssetId; - type Assets = Assets; - type Nfts = Nfts; - type PalletId = NftFractionalizationPalletId; - type WeightInfo = pallet_nft_fractionalization::weights::SubstrateWeight; -} - impl pallet_transaction_storage::Config for Runtime { type RuntimeEvent = RuntimeEvent; type Currency = Balances; diff --git a/frame/assets/src/impl_fungibles.rs b/frame/assets/src/impl_fungibles.rs index f47b04037fe65..a7df3d154ca8e 100644 --- a/frame/assets/src/impl_fungibles.rs +++ b/frame/assets/src/impl_fungibles.rs @@ -227,11 +227,11 @@ impl, I: 'static> fungibles::metadata::Mutate<:: } impl, I: 'static> - fungibles::metadata::CalcMetadataDeposit< + fungibles::metadata::MetadataDeposit< ::AccountId>>::Balance, > for Pallet { - fn calc( + fn calc_metadata_deposit( name: &[u8], symbol: &[u8], ) -> ::AccountId>>::Balance { diff --git a/frame/nft-fractionalization/src/benchmarking.rs b/frame/nft-fractionalization/src/benchmarking.rs index e9137b469a91a..7db497ba7244e 100644 --- a/frame/nft-fractionalization/src/benchmarking.rs +++ b/frame/nft-fractionalization/src/benchmarking.rs @@ -57,16 +57,16 @@ where fn mint_nft(nft_id: T::NftId) -> (T::AccountId, AccountIdLookupOf) where - T::NftCollectionId: From, - T::Currency: Unbalanced, T::Nfts: Create, T::BlockNumber, T::NftCollectionId>> + Mutate, { let caller: T::AccountId = whitelisted_caller(); let caller_lookup = T::Lookup::unlookup(caller.clone()); - assert_ok!(T::Currency::set_balance(&caller, BalanceOf::::max_value())); + T::Currency::set_total_issuance(BalanceOf::::max_value()); + assert_ok!(T::Currency::write_balance(&caller, BalanceOf::::max_value())); assert_ok!(T::Nfts::create_collection(&caller, &caller, &default_collection_config::())); - assert_ok!(T::Nfts::mint_into(&0.into(), &nft_id, &caller, &ItemConfig::default(), true)); + let collection = T::BenchmarkHelper::collection(0); + assert_ok!(T::Nfts::mint_into(&collection, &nft_id, &caller, &ItemConfig::default(), true)); (caller, caller_lookup) } @@ -81,47 +81,48 @@ fn assert_last_event(generic_event: ::RuntimeEvent) { benchmarks! { where_clause { where - T::AssetBalance: From, - T::AssetId: From, - T::NftCollectionId: From, - T::NftId: From, - T::Currency: Unbalanced, T::Nfts: Create, T::BlockNumber, T::NftCollectionId>> + Mutate, } fractionalize { - let (caller, caller_lookup) = mint_nft::(0.into()); - }: _(SystemOrigin::Signed(caller.clone()), 0.into(), 0.into(), 0.into(), caller_lookup, 1000u32.into()) + let asset = T::BenchmarkHelper::asset(0); + let collection = T::BenchmarkHelper::collection(0); + let nft = T::BenchmarkHelper::nft(0); + let (caller, caller_lookup) = mint_nft::(nft); + }: _(SystemOrigin::Signed(caller.clone()), collection, nft, asset, caller_lookup, 1000u32.into()) verify { assert_last_event::( Event::NftFractionalized { - nft_collection: 0.into(), - nft: 0.into(), + nft_collection: collection, + nft, fractions: 1000u32.into(), - asset: 0.into(), + asset, beneficiary: caller, }.into() ); } unify { - let (caller, caller_lookup) = mint_nft::(0.into()); + let asset = T::BenchmarkHelper::asset(0); + let collection = T::BenchmarkHelper::collection(0); + let nft = T::BenchmarkHelper::nft(0); + let (caller, caller_lookup) = mint_nft::(nft); NftFractionalization::::fractionalize( SystemOrigin::Signed(caller.clone()).into(), - 0.into(), - 0.into(), - 0.into(), + collection, + nft, + asset, caller_lookup.clone(), 1000u32.into(), )?; - }: _(SystemOrigin::Signed(caller.clone()), 0.into(), 0.into(), 0.into(), caller_lookup) + }: _(SystemOrigin::Signed(caller.clone()), collection, nft, asset, caller_lookup) verify { assert_last_event::( Event::NftUnified { - nft_collection: 0.into(), - nft: 0.into(), - asset: 0.into(), + nft_collection: collection, + nft, + asset, beneficiary: caller, }.into() ); diff --git a/frame/nft-fractionalization/src/lib.rs b/frame/nft-fractionalization/src/lib.rs index 945627e4f382c..81e063c959d21 100644 --- a/frame/nft-fractionalization/src/lib.rs +++ b/frame/nft-fractionalization/src/lib.rs @@ -56,42 +56,37 @@ pub use weights::WeightInfo; #[frame_support::pallet] pub mod pallet { use super::*; - use frame_system::pallet_prelude::*; - use scale_info::prelude::{format, string::String}; - use sp_runtime::traits::{One, Zero}; - use sp_std::{fmt::Display, prelude::*}; - use frame_support::{ dispatch::DispatchResult, + ensure, pallet_prelude::*, sp_runtime::traits::{AccountIdConversion, StaticLookup}, traits::{ + fungible::{ + hold::{Inspect as HoldInspectFungible, Mutate as HoldMutateFungible}, + Inspect as InspectFungible, Mutate as MutateFungible, + }, fungibles::{ - metadata::{CalcMetadataDeposit, Mutate as MutateMetadata}, + metadata::{MetadataDeposit, Mutate as MutateMetadata}, Create, Destroy, Inspect, Mutate, }, tokens::{ - nonfungibles_v2::{Inspect as NonFungiblesInspect, Transfer}, + nonfungibles_v2::{Inspect as NonFungiblesInspect, LockableNonfungible, Transfer}, AssetId, Balance as AssetBalance, Fortitude::Polite, - Precision::Exact, + Precision::{BestEffort, Exact}, + Preservation::Preserve, }, - Currency, ExistenceRequirement, ReservableCurrency, }, BoundedVec, PalletId, }; - - pub type AssetIdOf = - <::Assets as Inspect<::AccountId>>::AssetId; - pub type AssetBalanceOf = - <::Assets as Inspect<::AccountId>>::Balance; - pub type DepositOf = - <::Currency as Currency<::AccountId>>::Balance; - pub type AccountIdLookupOf = <::Lookup as StaticLookup>::Source; + use frame_system::pallet_prelude::*; + use scale_info::prelude::{format, string::String}; + use sp_runtime::traits::{One, Zero}; + use sp_std::{fmt::Display, prelude::*}; #[pallet::pallet] - #[pallet::generate_store(pub(super) trait Store)] - pub struct Pallet(_); + pub struct Pallet(PhantomData); #[pallet::config] pub trait Config: frame_system::Config { @@ -99,16 +94,19 @@ pub mod pallet { type RuntimeEvent: From> + IsType<::RuntimeEvent>; /// The currency mechanism, used for paying for deposits. - type Currency: ReservableCurrency; + type Currency: InspectFungible + + MutateFungible + + HoldInspectFungible + + HoldMutateFungible; + + #[pallet::constant] + type HoldReason: Get<>::Reason>; /// The deposit paid by the user locking an NFT. The deposit is returned to the original NFT /// owner when the asset is unified and the NFT is unlocked. #[pallet::constant] type Deposit: Get>; - #[pallet::constant] - type ExistentialDeposit: Get>; - /// Identifier for the collection of NFT. type NftCollectionId: Member + Parameter + MaxEncodedLen + Copy + Display; @@ -127,14 +125,15 @@ pub mod pallet { + Destroy + Mutate + MutateMetadata - + CalcMetadataDeposit<>::Balance>; + + MetadataDeposit>; /// Registry for minted NFTs. type Nfts: NonFungiblesInspect< Self::AccountId, ItemId = Self::NftId, CollectionId = Self::NftCollectionId, - > + Transfer; + > + Transfer + + LockableNonfungible; /// The pallet's id, used for deriving its sovereign account ID. #[pallet::constant] @@ -152,6 +151,10 @@ pub mod pallet { #[pallet::constant] type StringLimit: Get; + /// A set of helper functions for benchmarking. + #[cfg(feature = "runtime-benchmarks")] + type BenchmarkHelper: BenchmarkHelper; + /// Weight information for extrinsics in this pallet. type WeightInfo: WeightInfo; } @@ -237,7 +240,7 @@ pub mod pallet { let pallet_account = Self::get_pallet_account(); let deposit = T::Deposit::get(); - T::Currency::reserve(&nft_owner, deposit)?; + T::Currency::hold(&T::HoldReason::get(), &nft_owner, deposit)?; Self::do_lock_nft(nft_collection_id, nft_id)?; Self::do_create_asset(asset_id, pallet_account.clone())?; Self::do_mint_asset(asset_id, &beneficiary, fractions)?; @@ -295,7 +298,7 @@ pub mod pallet { let asset_creator = details.asset_creator; Self::do_burn_asset(asset_id, &who, details.fractions)?; Self::do_unlock_nft(nft_collection_id, nft_id, &beneficiary)?; - T::Currency::unreserve(&asset_creator, deposit); + T::Currency::release(&T::HoldReason::get(), &asset_creator, deposit, BestEffort)?; Self::deposit_event(Event::NftUnified { nft_collection: nft_collection_id, @@ -371,24 +374,15 @@ pub mod pallet { String::from_utf8_lossy(&T::NewAssetName::get()) ); let symbol: &[u8] = &T::NewAssetSymbol::get(); - let existential_deposit = T::ExistentialDeposit::get(); - let pallet_account_balance = T::Currency::free_balance(&pallet_account); + let existential_deposit = T::Currency::minimum_balance(); + let pallet_account_balance = T::Currency::balance(&pallet_account); + if pallet_account_balance < existential_deposit { - T::Currency::transfer( - &depositor, - &pallet_account, - existential_deposit, - ExistenceRequirement::KeepAlive, - )?; - } - let deposit = T::Assets::calc(name.as_bytes(), symbol); - if deposit != Zero::zero() { - T::Currency::transfer( - &depositor, - &pallet_account, - deposit, - ExistenceRequirement::KeepAlive, - )?; + T::Currency::transfer(&depositor, &pallet_account, existential_deposit, Preserve)?; + } + let metadata_deposit = T::Assets::calc_metadata_deposit(name.as_bytes(), symbol); + if !metadata_deposit.is_zero() { + T::Currency::transfer(&depositor, &pallet_account, metadata_deposit, Preserve)?; } T::Assets::set(asset_id, &pallet_account, name.into(), symbol.into(), 0) } diff --git a/frame/nft-fractionalization/src/mock.rs b/frame/nft-fractionalization/src/mock.rs index 326eb459cb84f..59ae3c846db7d 100644 --- a/frame/nft-fractionalization/src/mock.rs +++ b/frame/nft-fractionalization/src/mock.rs @@ -20,13 +20,18 @@ use super::*; use crate as pallet_nft_fractionalization; +use codec::{Decode, Encode, MaxEncodedLen}; use frame_support::{ construct_runtime, parameter_types, - traits::{tokens::nonfungibles_v2::Inspect, AsEnsureOriginWithArg, ConstU32, ConstU64, Locker}, + traits::{ + tokens::nonfungibles_v2::LockableNonfungible, AsEnsureOriginWithArg, ConstU32, ConstU64, + Locker, + }, BoundedVec, PalletId, }; use frame_system::EnsureSigned; -use pallet_nfts::{PalletFeatures, LOCKED_NFT_KEY}; +use pallet_nfts::PalletFeatures; +use scale_info::TypeInfo; use sp_core::H256; use sp_runtime::{ testing::Header, @@ -83,6 +88,13 @@ impl frame_system::Config for Test { type MaxConsumers = ConstU32<16>; } +#[derive( + Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Encode, Decode, MaxEncodedLen, Debug, TypeInfo, +)] +pub enum HoldIdentifier { + NftFractionalization, +} + impl pallet_balances::Config for Test { type Balance = u64; type DustRemoval = (); @@ -93,8 +105,8 @@ impl pallet_balances::Config for Test { type MaxLocks = (); type MaxReserves = ConstU32<50>; type ReserveIdentifier = [u8; 8]; - type HoldIdentifier = (); - type MaxHolds = (); + type HoldIdentifier = HoldIdentifier; + type MaxHolds = ConstU32<1>; type FreezeIdentifier = (); type MaxFreezes = (); } @@ -127,10 +139,11 @@ parameter_types! { pub storage Features: PalletFeatures = PalletFeatures::all_enabled(); } +// enables the NFTs locker pub struct TestLocker; impl Locker for TestLocker { fn is_locked(collection: CollectionId, item: ItemId) -> bool { - >::system_attribute(&collection, &item, LOCKED_NFT_KEY).is_some() + Nfts::is_locked(&collection, &item) } } @@ -169,12 +182,12 @@ parameter_types! { pub const NftFractionalizationPalletId: PalletId = PalletId(*b"fraction"); pub NewAssetSymbol: BoundedVec = (*b"FRAC").to_vec().try_into().unwrap(); pub NewAssetName: BoundedVec = (*b"Frac").to_vec().try_into().unwrap(); + pub const HoldReason: HoldIdentifier = HoldIdentifier::NftFractionalization; } impl Config for Test { type RuntimeEvent = RuntimeEvent; type Deposit = ConstU64<1>; - type ExistentialDeposit = ConstU64<1>; type Currency = Balances; type NewAssetSymbol = NewAssetSymbol; type NewAssetName = NewAssetName; @@ -187,6 +200,9 @@ impl Config for Test { type PalletId = NftFractionalizationPalletId; type WeightInfo = (); type StringLimit = StringLimit; + #[cfg(feature = "runtime-benchmarks")] + type BenchmarkHelper = (); + type HoldReason = HoldReason; } // Build genesis storage according to the mock runtime. diff --git a/frame/nft-fractionalization/src/tests.rs b/frame/nft-fractionalization/src/tests.rs index 33e2722411d40..8564b80533e0d 100644 --- a/frame/nft-fractionalization/src/tests.rs +++ b/frame/nft-fractionalization/src/tests.rs @@ -21,8 +21,8 @@ use crate::{mock::*, *}; use frame_support::{ assert_noop, assert_ok, traits::{ + fungible::{hold::Inspect as InspectHold, Mutate as MutateFungible}, fungibles::{metadata::Inspect, InspectEnumerable}, - Currency, }, }; use pallet_nfts::CollectionConfig; @@ -66,8 +66,8 @@ fn fractionalize_should_work() { let asset_id = 0; let fractions = 1000; - Balances::make_free_balance_be(&account(1), 100); - Balances::make_free_balance_be(&account(2), 100); + Balances::set_balance(&account(1), 100); + Balances::set_balance(&account(2), 100); assert_ok!(Nfts::force_create( RuntimeOrigin::root(), @@ -92,7 +92,7 @@ fn fractionalize_should_work() { )); assert_eq!(assets(), vec![asset_id]); assert_eq!(Assets::balance(asset_id, account(2)), fractions); - assert_eq!(Balances::reserved_balance(&account(1)), 2); + assert_eq!(Balances::total_balance_on_hold(&account(1)), 2); assert_eq!(String::from_utf8(Assets::name(0)).unwrap(), "Frac 0-0"); assert_eq!(String::from_utf8(Assets::symbol(0)).unwrap(), "FRAC"); assert_eq!(Nfts::owner(nft_collection_id, nft_id), Some(account(1))); @@ -164,8 +164,8 @@ fn unify_should_work() { let asset_id = 0; let fractions = 1000; - Balances::make_free_balance_be(&account(1), 100); - Balances::make_free_balance_be(&account(2), 100); + Balances::set_balance(&account(1), 100); + Balances::set_balance(&account(2), 100); assert_ok!(Nfts::force_create( RuntimeOrigin::root(), diff --git a/frame/nft-fractionalization/src/types.rs b/frame/nft-fractionalization/src/types.rs index 2e9b338ac57bc..cbaaf5f5160d3 100644 --- a/frame/nft-fractionalization/src/types.rs +++ b/frame/nft-fractionalization/src/types.rs @@ -17,8 +17,18 @@ //! Various basic types for use in the Nft fractionalization pallet. +use super::*; use codec::{Decode, Encode, MaxEncodedLen}; +use frame_support::traits::{fungible::Inspect as FunInspect, fungibles::Inspect}; use scale_info::TypeInfo; +use sp_runtime::traits::StaticLookup; + +pub type AssetIdOf = <::Assets as Inspect<::AccountId>>::AssetId; +pub type AssetBalanceOf = + <::Assets as Inspect<::AccountId>>::Balance; +pub type DepositOf = + <::Currency as FunInspect<::AccountId>>::Balance; +pub type AccountIdLookupOf = <::Lookup as StaticLookup>::Source; /// Stores the details of a fractionalized item. #[derive(Decode, Encode, Default, PartialEq, Eq, MaxEncodedLen, TypeInfo)] @@ -35,3 +45,32 @@ pub struct Details { /// Account that fractionalized an item. pub asset_creator: AccountId, } + +/// Benchmark Helper +#[cfg(feature = "runtime-benchmarks")] +pub trait BenchmarkHelper { + /// Returns an asset id from a given integer. + fn asset(id: u32) -> AssetId; + /// Returns a collection id from a given integer. + fn collection(id: u32) -> CollectionId; + /// Returns an nft id from a given integer. + fn nft(id: u32) -> ItemId; +} + +#[cfg(feature = "runtime-benchmarks")] +impl BenchmarkHelper for () +where + AssetId: From, + CollectionId: From, + ItemId: From, +{ + fn asset(id: u32) -> AssetId { + id.into() + } + fn collection(id: u32) -> CollectionId { + id.into() + } + fn nft(id: u32) -> ItemId { + id.into() + } +} diff --git a/frame/nft-fractionalization/src/weights.rs b/frame/nft-fractionalization/src/weights.rs index 59d086dcd95dd..d976e484ddc47 100644 --- a/frame/nft-fractionalization/src/weights.rs +++ b/frame/nft-fractionalization/src/weights.rs @@ -57,15 +57,15 @@ pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { fn fractionalize() -> Weight { // Minimum execution time: 44_312 nanoseconds. - Weight::from_ref_time(44_871_000) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(5)) + Weight::from_parts(25_147_000, 3549) + .saturating_add(T::DbWeight::get().reads(2_u64)) + .saturating_add(T::DbWeight::get().writes(5_u64)) } fn unify() -> Weight { // Minimum execution time: 31_654 nanoseconds. - Weight::from_ref_time(32_078_000) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(5)) + Weight::from_parts(25_147_000, 3549) + .saturating_add(T::DbWeight::get().reads(2_u64)) + .saturating_add(T::DbWeight::get().writes(5_u64)) } } @@ -73,14 +73,14 @@ impl WeightInfo for SubstrateWeight { impl WeightInfo for () { fn fractionalize() -> Weight { // Minimum execution time: 44_312 nanoseconds. - Weight::from_ref_time(44_871_000) - .saturating_add(RocksDbWeight::get().reads(2)) - .saturating_add(RocksDbWeight::get().writes(5)) + Weight::from_parts(25_147_000, 3549) + .saturating_add(RocksDbWeight::get().reads(2_u64)) + .saturating_add(RocksDbWeight::get().writes(5_u64)) } fn unify() -> Weight { // Minimum execution time: 31_654 nanoseconds. - Weight::from_ref_time(32_078_000) - .saturating_add(RocksDbWeight::get().reads(2)) - .saturating_add(RocksDbWeight::get().writes(5)) + Weight::from_parts(25_147_000, 3549) + .saturating_add(RocksDbWeight::get().reads(2_u64)) + .saturating_add(RocksDbWeight::get().writes(5_u64)) } } diff --git a/frame/nfts/src/impl_nonfungibles.rs b/frame/nfts/src/impl_nonfungibles.rs index 8269825c64b7b..d64b347c3cb3b 100644 --- a/frame/nfts/src/impl_nonfungibles.rs +++ b/frame/nfts/src/impl_nonfungibles.rs @@ -322,7 +322,13 @@ impl, I: 'static> Transfer for Pallet { ) -> DispatchResult { Self::do_transfer(*collection, *item, destination.clone(), |_, _| Ok(())) } +} +impl, I: 'static> LockableNonfungible for Pallet { + fn is_locked(collection: &Self::CollectionId, item: &Self::ItemId) -> bool { + >::system_attribute(&collection, &item, LOCKED_NFT_KEY) + .is_some() + } fn lock(collection: &Self::CollectionId, item: &Self::ItemId) -> DispatchResult { >::set_attribute( collection, @@ -331,7 +337,6 @@ impl, I: 'static> Transfer for Pallet { &[1], ) } - fn unlock(collection: &Self::CollectionId, item: &Self::ItemId) -> DispatchResult { >::clear_attribute( collection, diff --git a/frame/support/src/traits/tokens/fungible/regular.rs b/frame/support/src/traits/tokens/fungible/regular.rs index 3476549464032..c0658ad71d2b3 100644 --- a/frame/support/src/traits/tokens/fungible/regular.rs +++ b/frame/support/src/traits/tokens/fungible/regular.rs @@ -58,7 +58,8 @@ pub trait Inspect: Sized { /// The minimum balance any single account may have. fn minimum_balance() -> Self::Balance; - /// Get the total amount of funds whose ultimate bneficial ownership can be determined as `who`. + /// Get the total amount of funds whose ultimate beneficial ownership can be determined as + /// `who`. /// /// This may include funds which are wholly inaccessible to `who`, either temporarily or even /// indefinitely. diff --git a/frame/support/src/traits/tokens/fungibles/metadata.rs b/frame/support/src/traits/tokens/fungibles/metadata.rs index c62d7a901c78d..ab310119e5846 100644 --- a/frame/support/src/traits/tokens/fungibles/metadata.rs +++ b/frame/support/src/traits/tokens/fungibles/metadata.rs @@ -40,7 +40,7 @@ pub trait Mutate: Inspect { ) -> DispatchResult; } -pub trait CalcMetadataDeposit { +pub trait MetadataDeposit { // Returns the required deposit amount for a given metadata. - fn calc(name: &[u8], symbol: &[u8]) -> DepositBalance; + fn calc_metadata_deposit(name: &[u8], symbol: &[u8]) -> DepositBalance; } diff --git a/frame/support/src/traits/tokens/nonfungible_v2.rs b/frame/support/src/traits/tokens/nonfungible_v2.rs index 0c2e43808ce4b..7fe347015b1b4 100644 --- a/frame/support/src/traits/tokens/nonfungible_v2.rs +++ b/frame/support/src/traits/tokens/nonfungible_v2.rs @@ -173,10 +173,16 @@ pub trait Mutate: Inspect { } } -/// Trait for transferring and locking/unlocking non-fungible sets of items. +/// Trait for transferring non-fungible sets of items. pub trait Transfer: Inspect { /// Transfer `item` into `destination` account. fn transfer(item: &Self::ItemId, destination: &AccountId) -> DispatchResult; +} + +/// Trait for locking/unlocking non-fungible sets of items. +pub trait LockableNonfungible: Inspect { + /// Returns `true` if the `item`is locked. + fn is_locked(item: &Self::ItemId) -> bool; /// Disable the `item` transfer. fn lock(item: &Self::ItemId) -> DispatchResult; /// Re-enable the `item` transfer. @@ -316,10 +322,21 @@ impl< fn transfer(item: &Self::ItemId, destination: &AccountId) -> DispatchResult { >::transfer(&A::get(), item, destination) } +} + +impl< + F: nonfungibles::LockableNonfungible, + A: Get<>::CollectionId>, + AccountId, + > LockableNonfungible for ItemOf +{ + fn is_locked(item: &Self::ItemId) -> bool { + >::is_locked(&A::get(), item) + } fn lock(item: &Self::ItemId) -> DispatchResult { - >::lock(&A::get(), item) + >::lock(&A::get(), item) } fn unlock(item: &Self::ItemId) -> DispatchResult { - >::unlock(&A::get(), item) + >::unlock(&A::get(), item) } } diff --git a/frame/support/src/traits/tokens/nonfungibles_v2.rs b/frame/support/src/traits/tokens/nonfungibles_v2.rs index 40ec28c5198bb..e064b898ae6a7 100644 --- a/frame/support/src/traits/tokens/nonfungibles_v2.rs +++ b/frame/support/src/traits/tokens/nonfungibles_v2.rs @@ -330,7 +330,7 @@ pub trait Mutate: Inspect { } } -/// Trait for transferring and locking/unlocking non-fungible sets of items. +/// Trait for transferring non-fungible sets of items. pub trait Transfer: Inspect { /// Transfer `item` of `collection` into `destination` account. fn transfer( @@ -338,6 +338,14 @@ pub trait Transfer: Inspect { item: &Self::ItemId, destination: &AccountId, ) -> DispatchResult; +} + +/// Trait for locking/unlocking non-fungible sets of items. +pub trait LockableNonfungible: Inspect { + /// Disable the `item` of `collection` transfer. + fn is_locked(_collection: &Self::CollectionId, _item: &Self::ItemId) -> bool { + false + } /// Disable the `item` of `collection` transfer. /// From 0fa54b0ac2ed4ecf94ceb2f61f70f7ba517953ff Mon Sep 17 00:00:00 2001 From: Jegor Sidorenko Date: Wed, 17 May 2023 18:33:35 +0200 Subject: [PATCH 094/101] Simplify the locking mechanism --- bin/node/runtime/src/lib.rs | 19 ++------- frame/nft-fractionalization/src/lib.rs | 9 ++--- frame/nft-fractionalization/src/mock.rs | 17 +------- frame/nfts/src/features/transfer.rs | 10 +++++ frame/nfts/src/impl_nonfungibles.rs | 15 ++++--- .../src/traits/tokens/nonfungible_v2.rs | 39 +++++++------------ .../src/traits/tokens/nonfungibles_v2.rs | 12 +----- 7 files changed, 41 insertions(+), 80 deletions(-) diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index c322f5566f612..44769005a52b5 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -33,13 +33,10 @@ use frame_support::{ parameter_types, traits::{ fungible::ItemOf, - tokens::{ - nonfungibles_v2::{Inspect, LockableNonfungible}, - GetSalary, PayFromAccount, - }, + tokens::{nonfungibles_v2::Inspect, GetSalary, PayFromAccount}, AsEnsureOriginWithArg, ConstBool, ConstU128, ConstU16, ConstU32, Currency, EitherOfDiverse, EqualPrivilegeOnly, Everything, Imbalance, InstanceFilter, KeyOwnerProofSystem, - LockIdentifier, Locker, Nothing, OnUnbalanced, U128CurrencyToVote, WithdrawReasons, + LockIdentifier, Nothing, OnUnbalanced, U128CurrencyToVote, WithdrawReasons, }, weights::{ constants::{ @@ -1649,16 +1646,6 @@ parameter_types! { pub const MaxAttributesPerCall: u32 = 10; } -type ItemId = ::ItemId; -type CollectionId = ::CollectionId; - -pub struct NftLocker; -impl Locker for NftLocker { - fn is_locked(collection: CollectionId, item: ItemId) -> bool { - Nfts::is_locked(&collection, &item) - } -} - impl pallet_nfts::Config for Runtime { type RuntimeEvent = RuntimeEvent; type CollectionId = u32; @@ -1685,7 +1672,7 @@ impl pallet_nfts::Config for Runtime { #[cfg(feature = "runtime-benchmarks")] type Helper = (); type CreateOrigin = AsEnsureOriginWithArg>; - type Locker = NftLocker; + type Locker = (); } impl pallet_transaction_storage::Config for Runtime { diff --git a/frame/nft-fractionalization/src/lib.rs b/frame/nft-fractionalization/src/lib.rs index 81e063c959d21..4280b1484483e 100644 --- a/frame/nft-fractionalization/src/lib.rs +++ b/frame/nft-fractionalization/src/lib.rs @@ -71,7 +71,7 @@ pub mod pallet { Create, Destroy, Inspect, Mutate, }, tokens::{ - nonfungibles_v2::{Inspect as NonFungiblesInspect, LockableNonfungible, Transfer}, + nonfungibles_v2::{Inspect as NonFungiblesInspect, Transfer}, AssetId, Balance as AssetBalance, Fortitude::Polite, Precision::{BestEffort, Exact}, @@ -132,8 +132,7 @@ pub mod pallet { Self::AccountId, ItemId = Self::NftId, CollectionId = Self::NftCollectionId, - > + Transfer - + LockableNonfungible; + > + Transfer; /// The pallet's id, used for deriving its sovereign account ID. #[pallet::constant] @@ -323,7 +322,7 @@ pub mod pallet { /// Transfer the NFT from the account holding that NFT to the pallet's account. fn do_lock_nft(nft_collection_id: T::NftCollectionId, nft_id: T::NftId) -> DispatchResult { - T::Nfts::lock(&nft_collection_id, &nft_id) + T::Nfts::disable_transfer(&nft_collection_id, &nft_id) } /// Transfer the NFT to the account returning the tokens. @@ -332,7 +331,7 @@ pub mod pallet { nft_id: T::NftId, account: &T::AccountId, ) -> DispatchResult { - T::Nfts::unlock(&nft_collection_id, &nft_id)?; + T::Nfts::enable_transfer(&nft_collection_id, &nft_id)?; T::Nfts::transfer(&nft_collection_id, &nft_id, account) } diff --git a/frame/nft-fractionalization/src/mock.rs b/frame/nft-fractionalization/src/mock.rs index 59ae3c846db7d..05fbadb039398 100644 --- a/frame/nft-fractionalization/src/mock.rs +++ b/frame/nft-fractionalization/src/mock.rs @@ -23,10 +23,7 @@ use crate as pallet_nft_fractionalization; use codec::{Decode, Encode, MaxEncodedLen}; use frame_support::{ construct_runtime, parameter_types, - traits::{ - tokens::nonfungibles_v2::LockableNonfungible, AsEnsureOriginWithArg, ConstU32, ConstU64, - Locker, - }, + traits::{AsEnsureOriginWithArg, ConstU32, ConstU64}, BoundedVec, PalletId, }; use frame_system::EnsureSigned; @@ -44,8 +41,6 @@ type Block = frame_system::mocking::MockBlock; type Signature = MultiSignature; type AccountPublic = ::Signer; type AccountId = ::AccountId; -type ItemId = ::ItemId; -type CollectionId = ::CollectionId; // Configure a mock runtime to test the pallet. construct_runtime!( @@ -139,14 +134,6 @@ parameter_types! { pub storage Features: PalletFeatures = PalletFeatures::all_enabled(); } -// enables the NFTs locker -pub struct TestLocker; -impl Locker for TestLocker { - fn is_locked(collection: CollectionId, item: ItemId) -> bool { - Nfts::is_locked(&collection, &item) - } -} - impl pallet_nfts::Config for Test { type RuntimeEvent = RuntimeEvent; type CollectionId = u32; @@ -154,7 +141,7 @@ impl pallet_nfts::Config for Test { type Currency = Balances; type CreateOrigin = AsEnsureOriginWithArg>; type ForceOrigin = frame_system::EnsureRoot; - type Locker = TestLocker; + type Locker = (); type CollectionDeposit = ConstU64<2>; type ItemDeposit = ConstU64<1>; type MetadataDepositBase = ConstU64<1>; diff --git a/frame/nfts/src/features/transfer.rs b/frame/nfts/src/features/transfer.rs index 00b5d4e76882a..96736b8c7b774 100644 --- a/frame/nfts/src/features/transfer.rs +++ b/frame/nfts/src/features/transfer.rs @@ -30,8 +30,18 @@ impl, I: 'static> Pallet { ) -> DispatchResult { let collection_details = Collection::::get(&collection).ok_or(Error::::UnknownCollection)?; + ensure!(!T::Locker::is_locked(collection, item), Error::::ItemLocked); + // validate the item is not locked by another pallet + let attribute = ( + &collection, + Some(item), + AttributeNamespace::Pallet, + &Self::construct_attribute_key(LOCKED_NFT_KEY.encode())?, + ); + ensure!(!Attribute::::contains_key(attribute), Error::::ItemLocked); + let collection_config = Self::get_collection_config(&collection)?; ensure!( collection_config.is_setting_enabled(CollectionSetting::TransferableItems), diff --git a/frame/nfts/src/impl_nonfungibles.rs b/frame/nfts/src/impl_nonfungibles.rs index d64b347c3cb3b..ac1c92a950893 100644 --- a/frame/nfts/src/impl_nonfungibles.rs +++ b/frame/nfts/src/impl_nonfungibles.rs @@ -117,6 +117,10 @@ impl, I: 'static> Inspect<::AccountId> for Palle /// /// Default implementation is that all items are transferable. fn can_transfer(collection: &Self::CollectionId, item: &Self::ItemId) -> bool { + if Self::system_attribute(&collection, &item, LOCKED_NFT_KEY).is_some() { + return false + } + match ( CollectionConfigOf::::get(collection), ItemConfigOf::::get(collection, item), @@ -322,14 +326,8 @@ impl, I: 'static> Transfer for Pallet { ) -> DispatchResult { Self::do_transfer(*collection, *item, destination.clone(), |_, _| Ok(())) } -} -impl, I: 'static> LockableNonfungible for Pallet { - fn is_locked(collection: &Self::CollectionId, item: &Self::ItemId) -> bool { - >::system_attribute(&collection, &item, LOCKED_NFT_KEY) - .is_some() - } - fn lock(collection: &Self::CollectionId, item: &Self::ItemId) -> DispatchResult { + fn disable_transfer(collection: &Self::CollectionId, item: &Self::ItemId) -> DispatchResult { >::set_attribute( collection, item, @@ -337,7 +335,8 @@ impl, I: 'static> LockableNonfungible for Pallet DispatchResult { + + fn enable_transfer(collection: &Self::CollectionId, item: &Self::ItemId) -> DispatchResult { >::clear_attribute( collection, item, diff --git a/frame/support/src/traits/tokens/nonfungible_v2.rs b/frame/support/src/traits/tokens/nonfungible_v2.rs index 7fe347015b1b4..c4463e0070f9a 100644 --- a/frame/support/src/traits/tokens/nonfungible_v2.rs +++ b/frame/support/src/traits/tokens/nonfungible_v2.rs @@ -173,20 +173,18 @@ pub trait Mutate: Inspect { } } -/// Trait for transferring non-fungible sets of items. +/// Trait for transferring and controlling the transfer of non-fungible sets of items. pub trait Transfer: Inspect { /// Transfer `item` into `destination` account. fn transfer(item: &Self::ItemId, destination: &AccountId) -> DispatchResult; -} - -/// Trait for locking/unlocking non-fungible sets of items. -pub trait LockableNonfungible: Inspect { - /// Returns `true` if the `item`is locked. - fn is_locked(item: &Self::ItemId) -> bool; - /// Disable the `item` transfer. - fn lock(item: &Self::ItemId) -> DispatchResult; - /// Re-enable the `item` transfer. - fn unlock(item: &Self::ItemId) -> DispatchResult; + /// Disable the `item` of `collection` transfer. + /// + /// By default, this is not a supported operation. + fn disable_transfer(item: &Self::ItemId) -> DispatchResult; + /// Re-enable the `item` of `collection` transfer. + /// + /// By default, this is not a supported operation. + fn enable_transfer(item: &Self::ItemId) -> DispatchResult; } /// Convert a `nonfungibles` trait implementation into a `nonfungible` trait implementation by @@ -322,21 +320,10 @@ impl< fn transfer(item: &Self::ItemId, destination: &AccountId) -> DispatchResult { >::transfer(&A::get(), item, destination) } -} - -impl< - F: nonfungibles::LockableNonfungible, - A: Get<>::CollectionId>, - AccountId, - > LockableNonfungible for ItemOf -{ - fn is_locked(item: &Self::ItemId) -> bool { - >::is_locked(&A::get(), item) - } - fn lock(item: &Self::ItemId) -> DispatchResult { - >::lock(&A::get(), item) + fn disable_transfer(item: &Self::ItemId) -> DispatchResult { + >::disable_transfer(&A::get(), item) } - fn unlock(item: &Self::ItemId) -> DispatchResult { - >::unlock(&A::get(), item) + fn enable_transfer(item: &Self::ItemId) -> DispatchResult { + >::enable_transfer(&A::get(), item) } } diff --git a/frame/support/src/traits/tokens/nonfungibles_v2.rs b/frame/support/src/traits/tokens/nonfungibles_v2.rs index e064b898ae6a7..f4c3c22ea9bef 100644 --- a/frame/support/src/traits/tokens/nonfungibles_v2.rs +++ b/frame/support/src/traits/tokens/nonfungibles_v2.rs @@ -338,26 +338,18 @@ pub trait Transfer: Inspect { item: &Self::ItemId, destination: &AccountId, ) -> DispatchResult; -} - -/// Trait for locking/unlocking non-fungible sets of items. -pub trait LockableNonfungible: Inspect { - /// Disable the `item` of `collection` transfer. - fn is_locked(_collection: &Self::CollectionId, _item: &Self::ItemId) -> bool { - false - } /// Disable the `item` of `collection` transfer. /// /// By default, this is not a supported operation. - fn lock(_collection: &Self::CollectionId, _item: &Self::ItemId) -> DispatchResult { + fn disable_transfer(_collection: &Self::CollectionId, _item: &Self::ItemId) -> DispatchResult { Err(TokenError::Unsupported.into()) } /// Re-enable the `item` of `collection` transfer. /// /// By default, this is not a supported operation. - fn unlock(_collection: &Self::CollectionId, _item: &Self::ItemId) -> DispatchResult { + fn enable_transfer(_collection: &Self::CollectionId, _item: &Self::ItemId) -> DispatchResult { Err(TokenError::Unsupported.into()) } } From 49902428ce21c91440f59dab47d8bc69ff00dec6 Mon Sep 17 00:00:00 2001 From: Jegor Sidorenko Date: Thu, 18 May 2023 08:42:19 +0200 Subject: [PATCH 095/101] Use PalletAttributes enum instead of the LOCKED_NFT_KEY --- frame/nfts/src/features/attributes.rs | 15 +++++++++++++++ frame/nfts/src/features/transfer.rs | 11 +++-------- frame/nfts/src/impl_nonfungibles.rs | 13 +++++++------ frame/nfts/src/lib.rs | 1 - frame/nfts/src/types.rs | 2 ++ 5 files changed, 27 insertions(+), 15 deletions(-) diff --git a/frame/nfts/src/features/attributes.rs b/frame/nfts/src/features/attributes.rs index 9098679fa9145..8a9bbe8a61de0 100644 --- a/frame/nfts/src/features/attributes.rs +++ b/frame/nfts/src/features/attributes.rs @@ -394,4 +394,19 @@ impl, I: 'static> Pallet { ) -> Result, DispatchError> { Ok(BoundedVec::try_from(value).map_err(|_| Error::::IncorrectData)?) } + + /// A helper method to check whether a system attribute is set for a given item. + pub fn has_system_attribute( + collection: &T::CollectionId, + item: &T::ItemId, + attribute_key: PalletAttributes, + ) -> Result { + let attribute = ( + &collection, + Some(item), + AttributeNamespace::Pallet, + &Self::construct_attribute_key(attribute_key.encode())?, + ); + Ok(Attribute::::contains_key(attribute)) + } } diff --git a/frame/nfts/src/features/transfer.rs b/frame/nfts/src/features/transfer.rs index 96736b8c7b774..69209e1bb6c4b 100644 --- a/frame/nfts/src/features/transfer.rs +++ b/frame/nfts/src/features/transfer.rs @@ -32,15 +32,10 @@ impl, I: 'static> Pallet { Collection::::get(&collection).ok_or(Error::::UnknownCollection)?; ensure!(!T::Locker::is_locked(collection, item), Error::::ItemLocked); - - // validate the item is not locked by another pallet - let attribute = ( - &collection, - Some(item), - AttributeNamespace::Pallet, - &Self::construct_attribute_key(LOCKED_NFT_KEY.encode())?, + ensure!( + !Self::has_system_attribute(&collection, &item, PalletAttributes::TransferDisabled)?, + Error::::ItemLocked ); - ensure!(!Attribute::::contains_key(attribute), Error::::ItemLocked); let collection_config = Self::get_collection_config(&collection)?; ensure!( diff --git a/frame/nfts/src/impl_nonfungibles.rs b/frame/nfts/src/impl_nonfungibles.rs index ac1c92a950893..a2bb49a947e82 100644 --- a/frame/nfts/src/impl_nonfungibles.rs +++ b/frame/nfts/src/impl_nonfungibles.rs @@ -117,10 +117,11 @@ impl, I: 'static> Inspect<::AccountId> for Palle /// /// Default implementation is that all items are transferable. fn can_transfer(collection: &Self::CollectionId, item: &Self::ItemId) -> bool { - if Self::system_attribute(&collection, &item, LOCKED_NFT_KEY).is_some() { - return false + use PalletAttributes::TransferDisabled; + match Self::has_system_attribute(&collection, &item, TransferDisabled) { + Ok(transfer_disabled) if transfer_disabled => return false, + _ => (), } - match ( CollectionConfigOf::::get(collection), ItemConfigOf::::get(collection, item), @@ -331,8 +332,8 @@ impl, I: 'static> Transfer for Pallet { >::set_attribute( collection, item, - LOCKED_NFT_KEY, - &[1], + &PalletAttributes::::TransferDisabled.encode(), + &[], ) } @@ -340,7 +341,7 @@ impl, I: 'static> Transfer for Pallet { >::clear_attribute( collection, item, - LOCKED_NFT_KEY, + &PalletAttributes::::TransferDisabled.encode(), ) } } diff --git a/frame/nfts/src/lib.rs b/frame/nfts/src/lib.rs index e20e13c70d85d..4796819df6d2c 100644 --- a/frame/nfts/src/lib.rs +++ b/frame/nfts/src/lib.rs @@ -63,7 +63,6 @@ pub use weights::WeightInfo; pub const LOG_TARGET: &'static str = "runtime::nfts"; type AccountIdLookupOf = <::Lookup as StaticLookup>::Source; -pub const LOCKED_NFT_KEY: &[u8; 6] = b"locked"; #[frame_support::pallet] pub mod pallet { diff --git a/frame/nfts/src/types.rs b/frame/nfts/src/types.rs index fe6d31c12acec..8f36acd286c28 100644 --- a/frame/nfts/src/types.rs +++ b/frame/nfts/src/types.rs @@ -346,6 +346,8 @@ pub struct CancelAttributesApprovalWitness { pub enum PalletAttributes { /// Marks an item as being used in order to claim another item. UsedToClaim(CollectionId), + /// Marks an item as being restricted from transferring. + TransferDisabled, } /// Collection's configuration. From 2bd4dc910dbd0d98fe9cf0c9542d638949c70693 Mon Sep 17 00:00:00 2001 From: Jegor Sidorenko Date: Thu, 18 May 2023 15:09:30 +0200 Subject: [PATCH 096/101] Fix benchmark --- frame/nft-fractionalization/src/benchmarking.rs | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/frame/nft-fractionalization/src/benchmarking.rs b/frame/nft-fractionalization/src/benchmarking.rs index 7db497ba7244e..a04e8de12ffe8 100644 --- a/frame/nft-fractionalization/src/benchmarking.rs +++ b/frame/nft-fractionalization/src/benchmarking.rs @@ -24,13 +24,14 @@ use frame_benchmarking::{benchmarks, whitelisted_caller}; use frame_support::{ assert_ok, traits::{ - fungible::{Inspect as InspectFungible, Unbalanced}, + fungible::{Inspect as InspectFungible, Mutate as MutateFungible}, tokens::nonfungibles_v2::{Create, Mutate}, + Get, }, }; use frame_system::RawOrigin as SystemOrigin; use pallet_nfts::{CollectionConfig, CollectionSettings, ItemConfig, MintSettings}; -use sp_runtime::traits::{Bounded, StaticLookup}; +use sp_runtime::traits::StaticLookup; use sp_std::prelude::*; use crate::Pallet as NftFractionalization; @@ -62,8 +63,10 @@ where { let caller: T::AccountId = whitelisted_caller(); let caller_lookup = T::Lookup::unlookup(caller.clone()); - T::Currency::set_total_issuance(BalanceOf::::max_value()); - assert_ok!(T::Currency::write_balance(&caller, BalanceOf::::max_value())); + let ed = T::Currency::minimum_balance(); + let multiplier = BalanceOf::::from(100u8); + T::Currency::set_balance(&caller, ed * multiplier + T::Deposit::get() * multiplier); + assert_ok!(T::Nfts::create_collection(&caller, &caller, &default_collection_config::())); let collection = T::BenchmarkHelper::collection(0); assert_ok!(T::Nfts::mint_into(&collection, &nft_id, &caller, &ItemConfig::default(), true)); From 7e54a8968a405ea1491a27fbd16e5fb99f2d3f96 Mon Sep 17 00:00:00 2001 From: Jegor Sidorenko Date: Fri, 19 May 2023 13:49:39 +0200 Subject: [PATCH 097/101] Add missing licence details --- frame/nft-fractionalization/Cargo.toml | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/frame/nft-fractionalization/Cargo.toml b/frame/nft-fractionalization/Cargo.toml index 96c84c3cd8104..c93089a295e2b 100644 --- a/frame/nft-fractionalization/Cargo.toml +++ b/frame/nft-fractionalization/Cargo.toml @@ -1,11 +1,13 @@ [package] name = "pallet-nft-fractionalization" version = "4.0.0-dev" -description = "FRAME pallet for semi-fungible tokens." authors = ["Parity Technologies "] -homepage = "https://substrate.io" edition = "2021" +license = "Apache-2.0" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" +description = "FRAME pallet to convert non-fungible to fungible tokens." +readme = "README.md" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] From 042d152167c86aaec8f90ca82af2fd60984db855 Mon Sep 17 00:00:00 2001 From: Jegor Sidorenko Date: Fri, 19 May 2023 14:40:47 +0200 Subject: [PATCH 098/101] Update Cargo.toml --- frame/nft-fractionalization/Cargo.toml | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/frame/nft-fractionalization/Cargo.toml b/frame/nft-fractionalization/Cargo.toml index c93089a295e2b..917d9c5d345be 100644 --- a/frame/nft-fractionalization/Cargo.toml +++ b/frame/nft-fractionalization/Cargo.toml @@ -13,9 +13,9 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false } +codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false } log = { version = "0.4.17", default-features = false } -scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } +scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } frame-benchmarking = { version = "4.0.0-dev", default-features = false, optional = true, path = "../benchmarking" } frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } @@ -44,5 +44,9 @@ std = [ "sp-runtime/std", "sp-std/std", ] -runtime-benchmarks = ["frame-benchmarking/runtime-benchmarks"] +runtime-benchmarks = [ + "frame-benchmarking/runtime-benchmarks", + "frame-system/runtime-benchmarks", + "sp-runtime/runtime-benchmarks", +] try-runtime = ["frame-support/try-runtime"] From 16f256f5ec84712e2aacd1defc6e1ec7f3b1b80a Mon Sep 17 00:00:00 2001 From: command-bot <> Date: Fri, 19 May 2023 14:16:16 +0000 Subject: [PATCH 099/101] ".git/.scripts/commands/bench/bench.sh" pallet dev pallet_nft_fractionalization --- frame/nft-fractionalization/src/weights.rs | 138 ++++++++++++++++++--- 1 file changed, 118 insertions(+), 20 deletions(-) diff --git a/frame/nft-fractionalization/src/weights.rs b/frame/nft-fractionalization/src/weights.rs index d976e484ddc47..735b648b82615 100644 --- a/frame/nft-fractionalization/src/weights.rs +++ b/frame/nft-fractionalization/src/weights.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// Copyright (C) Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -18,12 +18,13 @@ //! Autogenerated weights for pallet_nft_fractionalization //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2022-12-22, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-05-19, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` //! HOSTNAME: `bm3`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: -// /home/benchbot/cargo_target_dir/production/substrate +// target/production/substrate // benchmark // pallet // --steps=50 @@ -42,9 +43,10 @@ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] #![allow(unused_imports)] +#![allow(missing_docs)] use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; -use sp_std::marker::PhantomData; +use core::marker::PhantomData; /// Weight functions needed for pallet_nft_fractionalization. pub trait WeightInfo { @@ -55,32 +57,128 @@ pub trait WeightInfo { /// Weights for pallet_nft_fractionalization using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { + /// Storage: Nfts Item (r:1 w:0) + /// Proof: Nfts Item (max_values: None, max_size: Some(861), added: 3336, mode: MaxEncodedLen) + /// Storage: Balances Holds (r:1 w:1) + /// Proof: Balances Holds (max_values: None, max_size: Some(83), added: 2558, mode: MaxEncodedLen) + /// Storage: Nfts Collection (r:1 w:1) + /// Proof: Nfts Collection (max_values: None, max_size: Some(84), added: 2559, mode: MaxEncodedLen) + /// Storage: Nfts Attribute (r:1 w:1) + /// Proof: Nfts Attribute (max_values: None, max_size: Some(446), added: 2921, mode: MaxEncodedLen) + /// Storage: Assets Asset (r:1 w:1) + /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) + /// Storage: Assets Account (r:1 w:1) + /// Proof: Assets Account (max_values: None, max_size: Some(134), added: 2609, mode: MaxEncodedLen) + /// Storage: System Account (r:1 w:1) + /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: Assets Metadata (r:1 w:1) + /// Proof: Assets Metadata (max_values: None, max_size: Some(140), added: 2615, mode: MaxEncodedLen) + /// Storage: NftFractionalization NftToAsset (r:0 w:1) + /// Proof: NftFractionalization NftToAsset (max_values: None, max_size: Some(92), added: 2567, mode: MaxEncodedLen) fn fractionalize() -> Weight { - // Minimum execution time: 44_312 nanoseconds. - Weight::from_parts(25_147_000, 3549) - .saturating_add(T::DbWeight::get().reads(2_u64)) - .saturating_add(T::DbWeight::get().writes(5_u64)) + // Proof Size summary in bytes: + // Measured: `609` + // Estimated: `4326` + // Minimum execution time: 177_498_000 picoseconds. + Weight::from_parts(178_803_000, 4326) + .saturating_add(T::DbWeight::get().reads(8_u64)) + .saturating_add(T::DbWeight::get().writes(8_u64)) } + /// Storage: NftFractionalization NftToAsset (r:1 w:1) + /// Proof: NftFractionalization NftToAsset (max_values: None, max_size: Some(92), added: 2567, mode: MaxEncodedLen) + /// Storage: Assets Asset (r:1 w:1) + /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) + /// Storage: Assets Account (r:1 w:1) + /// Proof: Assets Account (max_values: None, max_size: Some(134), added: 2609, mode: MaxEncodedLen) + /// Storage: Nfts Attribute (r:1 w:1) + /// Proof: Nfts Attribute (max_values: None, max_size: Some(446), added: 2921, mode: MaxEncodedLen) + /// Storage: Nfts Collection (r:1 w:1) + /// Proof: Nfts Collection (max_values: None, max_size: Some(84), added: 2559, mode: MaxEncodedLen) + /// Storage: Nfts CollectionConfigOf (r:1 w:0) + /// Proof: Nfts CollectionConfigOf (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) + /// Storage: Nfts ItemConfigOf (r:1 w:0) + /// Proof: Nfts ItemConfigOf (max_values: None, max_size: Some(48), added: 2523, mode: MaxEncodedLen) + /// Storage: Nfts Item (r:1 w:1) + /// Proof: Nfts Item (max_values: None, max_size: Some(861), added: 3336, mode: MaxEncodedLen) + /// Storage: Balances Holds (r:1 w:1) + /// Proof: Balances Holds (max_values: None, max_size: Some(83), added: 2558, mode: MaxEncodedLen) + /// Storage: Nfts Account (r:0 w:1) + /// Proof: Nfts Account (max_values: None, max_size: Some(88), added: 2563, mode: MaxEncodedLen) + /// Storage: Nfts ItemPriceOf (r:0 w:1) + /// Proof: Nfts ItemPriceOf (max_values: None, max_size: Some(89), added: 2564, mode: MaxEncodedLen) + /// Storage: Nfts PendingSwapOf (r:0 w:1) + /// Proof: Nfts PendingSwapOf (max_values: None, max_size: Some(71), added: 2546, mode: MaxEncodedLen) fn unify() -> Weight { - // Minimum execution time: 31_654 nanoseconds. - Weight::from_parts(25_147_000, 3549) - .saturating_add(T::DbWeight::get().reads(2_u64)) - .saturating_add(T::DbWeight::get().writes(5_u64)) + // Proof Size summary in bytes: + // Measured: `1421` + // Estimated: `4326` + // Minimum execution time: 130_284_000 picoseconds. + Weight::from_parts(131_122_000, 4326) + .saturating_add(T::DbWeight::get().reads(9_u64)) + .saturating_add(T::DbWeight::get().writes(10_u64)) } } // For backwards compatibility and tests impl WeightInfo for () { + /// Storage: Nfts Item (r:1 w:0) + /// Proof: Nfts Item (max_values: None, max_size: Some(861), added: 3336, mode: MaxEncodedLen) + /// Storage: Balances Holds (r:1 w:1) + /// Proof: Balances Holds (max_values: None, max_size: Some(83), added: 2558, mode: MaxEncodedLen) + /// Storage: Nfts Collection (r:1 w:1) + /// Proof: Nfts Collection (max_values: None, max_size: Some(84), added: 2559, mode: MaxEncodedLen) + /// Storage: Nfts Attribute (r:1 w:1) + /// Proof: Nfts Attribute (max_values: None, max_size: Some(446), added: 2921, mode: MaxEncodedLen) + /// Storage: Assets Asset (r:1 w:1) + /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) + /// Storage: Assets Account (r:1 w:1) + /// Proof: Assets Account (max_values: None, max_size: Some(134), added: 2609, mode: MaxEncodedLen) + /// Storage: System Account (r:1 w:1) + /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: Assets Metadata (r:1 w:1) + /// Proof: Assets Metadata (max_values: None, max_size: Some(140), added: 2615, mode: MaxEncodedLen) + /// Storage: NftFractionalization NftToAsset (r:0 w:1) + /// Proof: NftFractionalization NftToAsset (max_values: None, max_size: Some(92), added: 2567, mode: MaxEncodedLen) fn fractionalize() -> Weight { - // Minimum execution time: 44_312 nanoseconds. - Weight::from_parts(25_147_000, 3549) - .saturating_add(RocksDbWeight::get().reads(2_u64)) - .saturating_add(RocksDbWeight::get().writes(5_u64)) + // Proof Size summary in bytes: + // Measured: `609` + // Estimated: `4326` + // Minimum execution time: 177_498_000 picoseconds. + Weight::from_parts(178_803_000, 4326) + .saturating_add(RocksDbWeight::get().reads(8_u64)) + .saturating_add(RocksDbWeight::get().writes(8_u64)) } + /// Storage: NftFractionalization NftToAsset (r:1 w:1) + /// Proof: NftFractionalization NftToAsset (max_values: None, max_size: Some(92), added: 2567, mode: MaxEncodedLen) + /// Storage: Assets Asset (r:1 w:1) + /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) + /// Storage: Assets Account (r:1 w:1) + /// Proof: Assets Account (max_values: None, max_size: Some(134), added: 2609, mode: MaxEncodedLen) + /// Storage: Nfts Attribute (r:1 w:1) + /// Proof: Nfts Attribute (max_values: None, max_size: Some(446), added: 2921, mode: MaxEncodedLen) + /// Storage: Nfts Collection (r:1 w:1) + /// Proof: Nfts Collection (max_values: None, max_size: Some(84), added: 2559, mode: MaxEncodedLen) + /// Storage: Nfts CollectionConfigOf (r:1 w:0) + /// Proof: Nfts CollectionConfigOf (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) + /// Storage: Nfts ItemConfigOf (r:1 w:0) + /// Proof: Nfts ItemConfigOf (max_values: None, max_size: Some(48), added: 2523, mode: MaxEncodedLen) + /// Storage: Nfts Item (r:1 w:1) + /// Proof: Nfts Item (max_values: None, max_size: Some(861), added: 3336, mode: MaxEncodedLen) + /// Storage: Balances Holds (r:1 w:1) + /// Proof: Balances Holds (max_values: None, max_size: Some(83), added: 2558, mode: MaxEncodedLen) + /// Storage: Nfts Account (r:0 w:1) + /// Proof: Nfts Account (max_values: None, max_size: Some(88), added: 2563, mode: MaxEncodedLen) + /// Storage: Nfts ItemPriceOf (r:0 w:1) + /// Proof: Nfts ItemPriceOf (max_values: None, max_size: Some(89), added: 2564, mode: MaxEncodedLen) + /// Storage: Nfts PendingSwapOf (r:0 w:1) + /// Proof: Nfts PendingSwapOf (max_values: None, max_size: Some(71), added: 2546, mode: MaxEncodedLen) fn unify() -> Weight { - // Minimum execution time: 31_654 nanoseconds. - Weight::from_parts(25_147_000, 3549) - .saturating_add(RocksDbWeight::get().reads(2_u64)) - .saturating_add(RocksDbWeight::get().writes(5_u64)) + // Proof Size summary in bytes: + // Measured: `1421` + // Estimated: `4326` + // Minimum execution time: 130_284_000 picoseconds. + Weight::from_parts(131_122_000, 4326) + .saturating_add(RocksDbWeight::get().reads(9_u64)) + .saturating_add(RocksDbWeight::get().writes(10_u64)) } } From 594b2cb3d08bb81409203b57887065294274ea92 Mon Sep 17 00:00:00 2001 From: Jegor Sidorenko <5252494+jsidorenko@users.noreply.github.com> Date: Fri, 19 May 2023 21:51:51 +0200 Subject: [PATCH 100/101] Apply suggestions from code review Co-authored-by: joe petrowski <25483142+joepetrowski@users.noreply.github.com> --- frame/nft-fractionalization/src/lib.rs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/frame/nft-fractionalization/src/lib.rs b/frame/nft-fractionalization/src/lib.rs index 4280b1484483e..c61719c5c707a 100644 --- a/frame/nft-fractionalization/src/lib.rs +++ b/frame/nft-fractionalization/src/lib.rs @@ -15,7 +15,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! * NFT Fractionalization pallet +//! # NFT Fractionalization Pallet //! //! This pallet provides the basic functionality that should allow users //! to leverage partial ownership, transfers, and sales, of illiquid assets, @@ -30,8 +30,8 @@ //! //! ### Functions //! -//! * `fractionalize`: lock the NFT, create and mint new asset. -//! * `unify`: return 100% of the asset, unlock the NFT. +//! * `fractionalize`: Lock the NFT and create and mint a new fungible asset. +//! * `unify`: Return 100% of the asset and unlock the NFT. // Ensure we're `no_std` when compiling for Wasm. #![cfg_attr(not(feature = "std"), no_std)] @@ -217,7 +217,7 @@ pub mod pallet { /// - `asset_id`: The ID of the new asset. It must not exist. /// Is used within the context of `pallet_assets`. /// - `beneficiary`: The account that will receive the newly created asset. - /// - `fractions`: The amount to be minted of the newly created asset. + /// - `fractions`: The total issuance of the newly created asset class. /// /// Emits `NftFractionalized` event when successful. #[pallet::call_index(0)] @@ -261,7 +261,7 @@ pub mod pallet { Ok(()) } - /// Burn the whole amount of the asset and return back the locked NFT. + /// Burn the total issuance of the fungible asset and return (unlock) the locked NFT. /// /// The dispatch origin for this call must be Signed. /// From e4cb5ac7c8620105445eab97a3cef78852df94fd Mon Sep 17 00:00:00 2001 From: Jegor Sidorenko <5252494+jsidorenko@users.noreply.github.com> Date: Fri, 19 May 2023 21:52:13 +0200 Subject: [PATCH 101/101] Update frame/nft-fractionalization/README.md Co-authored-by: joe petrowski <25483142+joepetrowski@users.noreply.github.com> --- frame/nft-fractionalization/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/frame/nft-fractionalization/README.md b/frame/nft-fractionalization/README.md index 9f0553f855332..180eef22cc46f 100644 --- a/frame/nft-fractionalization/README.md +++ b/frame/nft-fractionalization/README.md @@ -3,4 +3,4 @@ Lock an NFT from `pallet-nfts` and mint fungible assets from `pallet-assets`. The NFT gets locked by putting a system-level attribute named `Locked`. This prevents the NFT from being transferred further. -The NFT becomes unlocked when the `Locked` attribute is removed. In order to unify the fungible asset and unlock the NFT, an account must hold the full balance of the asset the NFT was fractionalised into. Holding less of the fungible asset will not allow to unlock the NFT. +The NFT becomes unlocked when the `Locked` attribute is removed. In order to unify the fungible asset and unlock the NFT, an account must hold the full issuance of the asset the NFT was fractionalised into. Holding less of the fungible asset will not allow the unlocking of the NFT.

(); if type_id == sp_std::any::TypeId::of::() { - return Some(frame_support::crate_to_crate_version!()); + return Some(frame_support::crate_to_crate_version!()) } None diff --git a/frame/support/src/hash.rs b/frame/support/src/hash.rs index abfaf72e4dc16..9bdad6d9d59de 100644 --- a/frame/support/src/hash.rs +++ b/frame/support/src/hash.rs @@ -111,7 +111,7 @@ impl ReversibleStorageHasher for Twox64Concat { fn reverse(x: &[u8]) -> &[u8] { if x.len() < 8 { log::error!("Invalid reverse: hash length too short"); - return &[]; + return &[] } &x[8..] } @@ -133,7 +133,7 @@ impl ReversibleStorageHasher for Blake2_128Concat { fn reverse(x: &[u8]) -> &[u8] { if x.len() < 16 { log::error!("Invalid reverse: hash length too short"); - return &[]; + return &[] } &x[16..] } diff --git a/frame/support/src/lib.rs b/frame/support/src/lib.rs index 94f62815e3da2..f11e33b669be0 100644 --- a/frame/support/src/lib.rs +++ b/frame/support/src/lib.rs @@ -694,7 +694,7 @@ pub use frame_support_procedural::crate_to_crate_version; #[macro_export] macro_rules! fail { ( $y:expr ) => {{ - return Err($y.into()); + return Err($y.into()) }}; } diff --git a/frame/support/src/storage/child.rs b/frame/support/src/storage/child.rs index 89b5dfcbe7b95..397daaa82a677 100644 --- a/frame/support/src/storage/child.rs +++ b/frame/support/src/storage/child.rs @@ -112,9 +112,8 @@ pub fn take_or_else T>( /// Check to see if `key` has an explicit entry in storage. pub fn exists(child_info: &ChildInfo, key: &[u8]) -> bool { match child_info.child_type() { - ChildType::ParentKeyId => { - sp_io::default_child_storage::exists(child_info.storage_key(), key) - }, + ChildType::ParentKeyId => + sp_io::default_child_storage::exists(child_info.storage_key(), key), } } @@ -140,9 +139,8 @@ pub fn exists(child_info: &ChildInfo, key: &[u8]) -> bool { #[deprecated = "Use `clear_storage` instead"] pub fn kill_storage(child_info: &ChildInfo, limit: Option) -> KillStorageResult { match child_info.child_type() { - ChildType::ParentKeyId => { - sp_io::default_child_storage::storage_kill(child_info.storage_key(), limit) - }, + ChildType::ParentKeyId => + sp_io::default_child_storage::storage_kill(child_info.storage_key(), limit), } } @@ -187,9 +185,8 @@ pub fn clear_storage( // enabled. // sp_io::default_child_storage::storage_kill(prefix, maybe_limit, maybe_cursor) let r = match child_info.child_type() { - ChildType::ParentKeyId => { - sp_io::default_child_storage::storage_kill(child_info.storage_key(), maybe_limit) - }, + ChildType::ParentKeyId => + sp_io::default_child_storage::storage_kill(child_info.storage_key(), maybe_limit), }; use sp_io::KillStorageResult::*; let (maybe_cursor, backend) = match r { @@ -218,18 +215,16 @@ pub fn get_raw(child_info: &ChildInfo, key: &[u8]) -> Option> { /// Put a raw byte slice into storage. pub fn put_raw(child_info: &ChildInfo, key: &[u8], value: &[u8]) { match child_info.child_type() { - ChildType::ParentKeyId => { - sp_io::default_child_storage::set(child_info.storage_key(), key, value) - }, + ChildType::ParentKeyId => + sp_io::default_child_storage::set(child_info.storage_key(), key, value), } } /// Calculate current child root value. pub fn root(child_info: &ChildInfo, version: StateVersion) -> Vec { match child_info.child_type() { - ChildType::ParentKeyId => { - sp_io::default_child_storage::root(child_info.storage_key(), version) - }, + ChildType::ParentKeyId => + sp_io::default_child_storage::root(child_info.storage_key(), version), } } diff --git a/frame/support/src/storage/generator/double_map.rs b/frame/support/src/storage/generator/double_map.rs index 77d925917cd5f..c95dcee9d7e5c 100644 --- a/frame/support/src/storage/generator/double_map.rs +++ b/frame/support/src/storage/generator/double_map.rs @@ -473,7 +473,7 @@ where Some(value) => value, None => { log::error!("Invalid translate: fail to decode old value"); - continue; + continue }, }; let mut key_material = G::Hasher1::reverse(&previous_key[prefix.len()..]); @@ -481,7 +481,7 @@ where Ok(key1) => key1, Err(_) => { log::error!("Invalid translate: fail to decode key1"); - continue; + continue }, }; @@ -490,7 +490,7 @@ where Ok(key2) => key2, Err(_) => { log::error!("Invalid translate: fail to decode key2"); - continue; + continue }, }; diff --git a/frame/support/src/storage/generator/map.rs b/frame/support/src/storage/generator/map.rs index beb6805f2be77..f6c8eaa270bb3 100644 --- a/frame/support/src/storage/generator/map.rs +++ b/frame/support/src/storage/generator/map.rs @@ -115,7 +115,7 @@ impl Iter } }, None => None, - }; + } } } } @@ -188,7 +188,7 @@ where Some(value) => value, None => { log::error!("Invalid translate: fail to decode old value"); - continue; + continue }, }; @@ -197,7 +197,7 @@ where Ok(key) => key, Err(_) => { log::error!("Invalid translate: fail to decode key"); - continue; + continue }, }; diff --git a/frame/support/src/storage/generator/nmap.rs b/frame/support/src/storage/generator/nmap.rs index 6162cec1f0a16..79f3d72044e28 100755 --- a/frame/support/src/storage/generator/nmap.rs +++ b/frame/support/src/storage/generator/nmap.rs @@ -430,7 +430,7 @@ impl> Some(value) => value, None => { log::error!("Invalid translate: fail to decode old value"); - continue; + continue }, }; @@ -438,7 +438,7 @@ impl> Ok((final_key, _)) => final_key, Err(_) => { log::error!("Invalid translate: fail to decode key"); - continue; + continue }, }; diff --git a/frame/support/src/storage/migration.rs b/frame/support/src/storage/migration.rs index f30f866a9bc77..67001fc4e1f42 100644 --- a/frame/support/src/storage/migration.rs +++ b/frame/support/src/storage/migration.rs @@ -87,7 +87,7 @@ impl Iterator for StorageIterator { } }, None => None, - }; + } } } } @@ -160,7 +160,7 @@ impl Iterator } }, None => None, - }; + } } } } @@ -372,7 +372,7 @@ pub fn move_pallet(old_pallet_name: &[u8], new_pallet_name: &[u8]) { /// NOTE: The value at the key `from_prefix` is not moved. pub fn move_prefix(from_prefix: &[u8], to_prefix: &[u8]) { if from_prefix == to_prefix { - return; + return } let iter = PrefixIterator::<_> { diff --git a/frame/support/src/storage/mod.rs b/frame/support/src/storage/mod.rs index d20fffc8345a5..333f4382557b1 100644 --- a/frame/support/src/storage/mod.rs +++ b/frame/support/src/storage/mod.rs @@ -912,7 +912,7 @@ impl Iterator for PrefixIterator Iterator for PrefixIterator None, - }; + } } } } @@ -1010,12 +1010,12 @@ impl Iterator for KeyPrefixIterator { Ok(item) => return Some(item), Err(e) => { log::error!("key failed to decode at {:?}: {:?}", self.previous_key, e); - continue; + continue }, } } - return None; + return None } } } @@ -1125,7 +1125,7 @@ impl Iterator for ChildTriePrefixIterator { "next_key returned a key with no value at {:?}", self.previous_key, ); - continue; + continue }, }; if self.drain { @@ -1140,14 +1140,14 @@ impl Iterator for ChildTriePrefixIterator { self.previous_key, e, ); - continue; + continue }, }; Some(item) }, None => None, - }; + } } } } @@ -1255,7 +1255,7 @@ pub trait StoragePrefixedMap { }, None => { log::error!("old key failed to decode at {:?}", previous_key); - continue; + continue }, } } diff --git a/frame/support/src/storage/storage_noop_guard.rs b/frame/support/src/storage/storage_noop_guard.rs index 6a5804723dce0..7186c3eaf467a 100644 --- a/frame/support/src/storage/storage_noop_guard.rs +++ b/frame/support/src/storage/storage_noop_guard.rs @@ -49,7 +49,7 @@ impl Drop for StorageNoopGuard { fn drop(&mut self) { // No need to double panic, eg. inside a test assertion failure. if sp_std::thread::panicking() { - return; + return } assert_eq!( frame_support::storage_root(frame_support::StateVersion::V1), diff --git a/frame/support/src/storage/transactional.rs b/frame/support/src/storage/transactional.rs index d283a434a0f82..909d5909ed8bd 100644 --- a/frame/support/src/storage/transactional.rs +++ b/frame/support/src/storage/transactional.rs @@ -57,7 +57,7 @@ fn kill_transaction_level() { fn inc_transaction_level() -> Result { let existing_levels = get_transaction_level(); if existing_levels >= TRANSACTIONAL_LIMIT { - return Err(()); + return Err(()) } // Cannot overflow because of check above. set_transaction_level(existing_levels + 1); @@ -232,7 +232,7 @@ mod tests { fn recursive_transactional(num: u32) -> DispatchResult { if num == 0 { - return Ok(()); + return Ok(()) } with_transaction(|| -> TransactionOutcome { diff --git a/frame/support/src/traits/tokens/fungible/balanced.rs b/frame/support/src/traits/tokens/fungible/balanced.rs index ed1a032283bf5..0e75ccc22d050 100644 --- a/frame/support/src/traits/tokens/fungible/balanced.rs +++ b/frame/support/src/traits/tokens/fungible/balanced.rs @@ -165,7 +165,7 @@ pub trait Unbalanced: Inspect { ) -> Result { let old_balance = Self::balance(who); let (mut new_balance, mut amount) = if Self::reducible_balance(who, false) < amount { - return Err(TokenError::NoFunds.into()); + return Err(TokenError::NoFunds.into()) } else { (old_balance - amount, amount) }; @@ -226,7 +226,7 @@ pub trait Unbalanced: Inspect { let old_balance = Self::balance(who); let new_balance = old_balance.checked_add(&amount).ok_or(ArithmeticError::Overflow)?; if new_balance < Self::minimum_balance() { - return Err(TokenError::BelowMinimum.into()); + return Err(TokenError::BelowMinimum.into()) } if old_balance != new_balance { Self::set_balance(who, new_balance)?; diff --git a/frame/support/src/traits/tokens/fungibles/balanced.rs b/frame/support/src/traits/tokens/fungibles/balanced.rs index f1b9144093690..9e50ff834a874 100644 --- a/frame/support/src/traits/tokens/fungibles/balanced.rs +++ b/frame/support/src/traits/tokens/fungibles/balanced.rs @@ -186,7 +186,7 @@ pub trait Unbalanced: Inspect { ) -> Result { let old_balance = Self::balance(asset, who); let (mut new_balance, mut amount) = if Self::reducible_balance(asset, who, false) < amount { - return Err(TokenError::NoFunds.into()); + return Err(TokenError::NoFunds.into()) } else { (old_balance - amount, amount) }; @@ -252,7 +252,7 @@ pub trait Unbalanced: Inspect { let old_balance = Self::balance(asset, who); let new_balance = old_balance.checked_add(&amount).ok_or(ArithmeticError::Overflow)?; if new_balance < Self::minimum_balance(asset) { - return Err(TokenError::BelowMinimum.into()); + return Err(TokenError::BelowMinimum.into()) } if old_balance != new_balance { Self::set_balance(asset, who, new_balance)?; diff --git a/frame/support/src/traits/tokens/imbalance.rs b/frame/support/src/traits/tokens/imbalance.rs index 25c8503131fba..d721beb41494c 100644 --- a/frame/support/src/traits/tokens/imbalance.rs +++ b/frame/support/src/traits/tokens/imbalance.rs @@ -83,7 +83,7 @@ pub trait Imbalance: Sized + TryDrop + Default { { let total: u32 = first.saturating_add(second); if total == 0 { - return (Self::zero(), Self::zero()); + return (Self::zero(), Self::zero()) } let amount1 = self.peek().saturating_mul(first.into()) / total.into(); self.split(amount1) diff --git a/frame/support/src/traits/tokens/imbalance/signed_imbalance.rs b/frame/support/src/traits/tokens/imbalance/signed_imbalance.rs index dfa27068c7cb1..f969a4363405a 100644 --- a/frame/support/src/traits/tokens/imbalance/signed_imbalance.rs +++ b/frame/support/src/traits/tokens/imbalance/signed_imbalance.rs @@ -54,12 +54,10 @@ impl< /// both. pub fn merge(self, other: Self) -> Self { match (self, other) { - (SignedImbalance::Positive(one), SignedImbalance::Positive(other)) => { - SignedImbalance::Positive(one.merge(other)) - }, - (SignedImbalance::Negative(one), SignedImbalance::Negative(other)) => { - SignedImbalance::Negative(one.merge(other)) - }, + (SignedImbalance::Positive(one), SignedImbalance::Positive(other)) => + SignedImbalance::Positive(one.merge(other)), + (SignedImbalance::Negative(one), SignedImbalance::Negative(other)) => + SignedImbalance::Negative(one.merge(other)), (SignedImbalance::Positive(one), SignedImbalance::Negative(other)) => { match one.offset(other) { SameOrOther::Same(positive) => SignedImbalance::Positive(positive), diff --git a/frame/support/src/traits/try_runtime.rs b/frame/support/src/traits/try_runtime.rs index cf169426aa996..640bb566a65af 100644 --- a/frame/support/src/traits/try_runtime.rs +++ b/frame/support/src/traits/try_runtime.rs @@ -66,7 +66,7 @@ impl sp_std::str::FromStr for Select { match s { "all" | "All" => Ok(Select::All), "none" | "None" => Ok(Select::None), - _ => { + _ => if s.starts_with("rr-") { let count = s .split_once('-') @@ -76,8 +76,7 @@ impl sp_std::str::FromStr for Select { } else { let pallets = s.split(',').map(|x| x.as_bytes().to_vec()).collect::>(); Ok(Select::Only(pallets)) - } - }, + }, } } } diff --git a/frame/support/test/tests/construct_runtime_ui.rs b/frame/support/test/tests/construct_runtime_ui.rs index d643eaeac7e37..42fd87ca95c0e 100644 --- a/frame/support/test/tests/construct_runtime_ui.rs +++ b/frame/support/test/tests/construct_runtime_ui.rs @@ -21,7 +21,7 @@ fn ui() { // Only run the ui tests when `RUN_UI_TESTS` is set. if std::env::var("RUN_UI_TESTS").is_err() { - return; + return } // As trybuild is using `cargo check`, we don't need the real WASM binaries. diff --git a/frame/support/test/tests/decl_module_ui.rs b/frame/support/test/tests/decl_module_ui.rs index 9328b7b63cd1e..292451335e7ea 100644 --- a/frame/support/test/tests/decl_module_ui.rs +++ b/frame/support/test/tests/decl_module_ui.rs @@ -21,7 +21,7 @@ fn decl_module_ui() { // Only run the ui tests when `RUN_UI_TESTS` is set. if std::env::var("RUN_UI_TESTS").is_err() { - return; + return } // As trybuild is using `cargo check`, we don't need the real WASM binaries. diff --git a/frame/support/test/tests/decl_storage_ui.rs b/frame/support/test/tests/decl_storage_ui.rs index 40487273ad5ae..34dfea8601ab9 100644 --- a/frame/support/test/tests/decl_storage_ui.rs +++ b/frame/support/test/tests/decl_storage_ui.rs @@ -21,7 +21,7 @@ fn decl_storage_ui() { // Only run the ui tests when `RUN_UI_TESTS` is set. if std::env::var("RUN_UI_TESTS").is_err() { - return; + return } // As trybuild is using `cargo check`, we don't need the real WASM binaries. diff --git a/frame/support/test/tests/derive_no_bound_ui.rs b/frame/support/test/tests/derive_no_bound_ui.rs index d6eda198c9a9b..d714e1113625a 100644 --- a/frame/support/test/tests/derive_no_bound_ui.rs +++ b/frame/support/test/tests/derive_no_bound_ui.rs @@ -21,7 +21,7 @@ fn derive_no_bound_ui() { // Only run the ui tests when `RUN_UI_TESTS` is set. if std::env::var("RUN_UI_TESTS").is_err() { - return; + return } // As trybuild is using `cargo check`, we don't need the real WASM binaries. diff --git a/frame/support/test/tests/pallet.rs b/frame/support/test/tests/pallet.rs index 5a29b70e134a1..0fd32dad2242a 100644 --- a/frame/support/test/tests/pallet.rs +++ b/frame/support/test/tests/pallet.rs @@ -407,7 +407,7 @@ pub mod pallet { let _ = T::AccountId::from(SomeType1); // Test for where clause let _ = T::AccountId::from(SomeType5); // Test for where clause if matches!(call, Call::foo_storage_layer { .. }) { - return Ok(ValidTransaction::default()); + return Ok(ValidTransaction::default()) } Err(TransactionValidityError::Invalid(InvalidTransaction::Call)) } @@ -1552,9 +1552,9 @@ fn metadata() { }, ]; - let empty_doc = pallets[0].event.as_ref().unwrap().ty.type_info().docs().is_empty() - && pallets[0].error.as_ref().unwrap().ty.type_info().docs().is_empty() - && pallets[0].calls.as_ref().unwrap().ty.type_info().docs().is_empty(); + let empty_doc = pallets[0].event.as_ref().unwrap().ty.type_info().docs().is_empty() && + pallets[0].error.as_ref().unwrap().ty.type_info().docs().is_empty() && + pallets[0].calls.as_ref().unwrap().ty.type_info().docs().is_empty(); if cfg!(feature = "no-metadata-docs") { assert!(empty_doc) diff --git a/frame/support/test/tests/pallet_compatibility.rs b/frame/support/test/tests/pallet_compatibility.rs index 209abac8a3368..398137d644ee4 100644 --- a/frame/support/test/tests/pallet_compatibility.rs +++ b/frame/support/test/tests/pallet_compatibility.rs @@ -288,9 +288,8 @@ mod test { fn metadata() { let metadata = Runtime::metadata(); let (pallets, types) = match metadata.1 { - frame_support::metadata::RuntimeMetadata::V14(metadata) => { - (metadata.pallets, metadata.types) - }, + frame_support::metadata::RuntimeMetadata::V14(metadata) => + (metadata.pallets, metadata.types), _ => unreachable!(), }; diff --git a/frame/support/test/tests/pallet_compatibility_instance.rs b/frame/support/test/tests/pallet_compatibility_instance.rs index 762a2e729f1e3..e8b5fe9fa33d4 100644 --- a/frame/support/test/tests/pallet_compatibility_instance.rs +++ b/frame/support/test/tests/pallet_compatibility_instance.rs @@ -292,9 +292,8 @@ mod test { fn metadata() { let metadata = Runtime::metadata(); let (pallets, types) = match metadata.1 { - frame_support::metadata::RuntimeMetadata::V14(metadata) => { - (metadata.pallets, metadata.types) - }, + frame_support::metadata::RuntimeMetadata::V14(metadata) => + (metadata.pallets, metadata.types), _ => unreachable!(), }; diff --git a/frame/support/test/tests/pallet_ui.rs b/frame/support/test/tests/pallet_ui.rs index 428341f2262b7..2db1d3cb0543a 100644 --- a/frame/support/test/tests/pallet_ui.rs +++ b/frame/support/test/tests/pallet_ui.rs @@ -21,7 +21,7 @@ fn pallet_ui() { // Only run the ui tests when `RUN_UI_TESTS` is set. if std::env::var("RUN_UI_TESTS").is_err() { - return; + return } // As trybuild is using `cargo check`, we don't need the real WASM binaries. diff --git a/frame/support/test/tests/storage_alias_ui.rs b/frame/support/test/tests/storage_alias_ui.rs index 7243f7ea0206e..d45d071578dab 100644 --- a/frame/support/test/tests/storage_alias_ui.rs +++ b/frame/support/test/tests/storage_alias_ui.rs @@ -21,7 +21,7 @@ fn storage_alias_ui() { // Only run the ui tests when `RUN_UI_TESTS` is set. if std::env::var("RUN_UI_TESTS").is_err() { - return; + return } // As trybuild is using `cargo check`, we don't need the real WASM binaries. diff --git a/frame/system/src/extensions/check_non_zero_sender.rs b/frame/system/src/extensions/check_non_zero_sender.rs index 36e85202d68ea..036f70c2fdd48 100644 --- a/frame/system/src/extensions/check_non_zero_sender.rs +++ b/frame/system/src/extensions/check_non_zero_sender.rs @@ -83,7 +83,7 @@ where _len: usize, ) -> TransactionValidity { if who.using_encoded(|d| d.iter().all(|x| *x == 0)) { - return Err(TransactionValidityError::Invalid(InvalidTransaction::BadSigner)); + return Err(TransactionValidityError::Invalid(InvalidTransaction::BadSigner)) } Ok(ValidTransaction::default()) } diff --git a/frame/system/src/extensions/check_nonce.rs b/frame/system/src/extensions/check_nonce.rs index 601c053be5a24..1616a2d8a119e 100644 --- a/frame/system/src/extensions/check_nonce.rs +++ b/frame/system/src/extensions/check_nonce.rs @@ -86,7 +86,7 @@ where } else { InvalidTransaction::Future } - .into()); + .into()) } account.nonce += T::Index::one(); crate::Account::::insert(who, account); @@ -103,7 +103,7 @@ where // check index let account = crate::Account::::get(who); if self.0 < account.nonce { - return InvalidTransaction::Stale.into(); + return InvalidTransaction::Stale.into() } let provides = vec![Encode::encode(&(who, self.0))]; diff --git a/frame/system/src/extensions/check_weight.rs b/frame/system/src/extensions/check_weight.rs index b0c39e63e01ff..5c3b80f59bfa8 100644 --- a/frame/system/src/extensions/check_weight.rs +++ b/frame/system/src/extensions/check_weight.rs @@ -50,9 +50,8 @@ where ) -> Result<(), TransactionValidityError> { let max = T::BlockWeights::get().get(info.class).max_extrinsic; match max { - Some(max) if info.weight.any_gt(max) => { - Err(InvalidTransaction::ExhaustsResources.into()) - }, + Some(max) if info.weight.any_gt(max) => + Err(InvalidTransaction::ExhaustsResources.into()), _ => Ok(()), } } @@ -147,9 +146,8 @@ where // Check if we don't exceed per-class allowance match limit_per_class.max_total { - Some(max) if per_class.any_gt(max) => { - return Err(InvalidTransaction::ExhaustsResources.into()) - }, + Some(max) if per_class.any_gt(max) => + return Err(InvalidTransaction::ExhaustsResources.into()), // There is no `max_total` limit (`None`), // or we are below the limit. _ => {}, @@ -160,9 +158,8 @@ where if all_weight.total().any_gt(maximum_weight.max_block) { match limit_per_class.reserved { // We are over the limit in reserved pool. - Some(reserved) if per_class.any_gt(reserved) => { - return Err(InvalidTransaction::ExhaustsResources.into()) - }, + Some(reserved) if per_class.any_gt(reserved) => + return Err(InvalidTransaction::ExhaustsResources.into()), // There is either no limit in reserved pool (`None`), // or we are below the limit. _ => {}, @@ -194,7 +191,7 @@ where len: usize, ) -> Result<(), TransactionValidityError> { if info.class == DispatchClass::Mandatory { - return Err(InvalidTransaction::MandatoryDispatch.into()); + return Err(InvalidTransaction::MandatoryDispatch.into()) } Self::do_pre_dispatch(info, len) } @@ -207,7 +204,7 @@ where len: usize, ) -> TransactionValidity { if info.class == DispatchClass::Mandatory { - return Err(InvalidTransaction::MandatoryDispatch.into()); + return Err(InvalidTransaction::MandatoryDispatch.into()) } Self::do_validate(info, len) } @@ -240,7 +237,7 @@ where // extrinsics that result in error. if let (DispatchClass::Mandatory, Err(e)) = (info.class, result) { log::error!(target: "runtime::system", "Bad mandatory: {:?}", e); - return Err(InvalidTransaction::BadMandatory.into()); + return Err(InvalidTransaction::BadMandatory.into()) } let unspent = post_info.calc_unspent(info); @@ -324,8 +321,8 @@ mod tests { fn normal_extrinsic_limited_by_maximum_extrinsic_weight() { new_test_ext().execute_with(|| { let max = DispatchInfo { - weight: block_weights().get(DispatchClass::Normal).max_extrinsic.unwrap() - + Weight::from_ref_time(1), + weight: block_weights().get(DispatchClass::Normal).max_extrinsic.unwrap() + + Weight::from_ref_time(1), class: DispatchClass::Normal, ..Default::default() }; @@ -615,9 +612,9 @@ mod tests { let pre = CheckWeight::(PhantomData).pre_dispatch(&1, CALL, &info, len).unwrap(); assert_eq!( BlockWeight::::get().total(), - info.weight - + Weight::from_ref_time(128) - + block_weights().get(DispatchClass::Normal).base_extrinsic, + info.weight + + Weight::from_ref_time(128) + + block_weights().get(DispatchClass::Normal).base_extrinsic, ); assert_ok!(CheckWeight::::post_dispatch( @@ -629,9 +626,9 @@ mod tests { )); assert_eq!( BlockWeight::::get().total(), - info.weight - + Weight::from_ref_time(128) - + block_weights().get(DispatchClass::Normal).base_extrinsic, + info.weight + + Weight::from_ref_time(128) + + block_weights().get(DispatchClass::Normal).base_extrinsic, ); }) } diff --git a/frame/system/src/lib.rs b/frame/system/src/lib.rs index 4b0607baff61a..477ebb97fbd95 100644 --- a/frame/system/src/lib.rs +++ b/frame/system/src/lib.rs @@ -1219,7 +1219,7 @@ impl Pallet { let block_number = Self::block_number(); // Don't populate events on genesis. if block_number.is_zero() { - return; + return } let phase = ExecutionPhase::::get().unwrap_or_default(); @@ -1561,11 +1561,11 @@ impl Pallet { .ok_or(Error::::FailedToExtractRuntimeVersion)?; if new_version.spec_name != current_version.spec_name { - return Err(Error::::InvalidSpecName.into()); + return Err(Error::::InvalidSpecName.into()) } if new_version.spec_version <= current_version.spec_version { - return Err(Error::::SpecVersionNeedsToIncrease.into()); + return Err(Error::::SpecVersionNeedsToIncrease.into()) } Ok(()) @@ -1650,7 +1650,7 @@ impl StoredMap for Pallet { }, } } else if !was_providing && !is_providing { - return Ok(result); + return Ok(result) } Account::::mutate(k, |a| a.data = some_data.unwrap_or_default()); Ok(result) diff --git a/frame/system/src/offchain.rs b/frame/system/src/offchain.rs index 14bf4b2203373..99a4c1541d30f 100644 --- a/frame/system/src/offchain.rs +++ b/frame/system/src/offchain.rs @@ -207,7 +207,7 @@ impl> Signer, I: 'static>(length: u32) -> (T::AccountId, Vec, T::AccountId) { let caller = whitelisted_caller(); - let value = T::TipReportDepositBase::get() - + T::DataDepositPerByte::get() * length.into() - + T::Currency::minimum_balance(); + let value = T::TipReportDepositBase::get() + + T::DataDepositPerByte::get() * length.into() + + T::Currency::minimum_balance(); let _ = T::Currency::make_free_balance_be(&caller, value); let reason = vec![0; length as usize]; let awesome_person = account("awesome", 0, SEED); diff --git a/frame/tips/src/lib.rs b/frame/tips/src/lib.rs index 74233e1dec5e1..9313a26e52e00 100644 --- a/frame/tips/src/lib.rs +++ b/frame/tips/src/lib.rs @@ -254,8 +254,8 @@ pub mod pallet { let hash = T::Hashing::hash_of(&(&reason_hash, &who)); ensure!(!Tips::::contains_key(&hash), Error::::AlreadyKnown); - let deposit = T::TipReportDepositBase::get() - + T::DataDepositPerByte::get() * (reason.len() as u32).into(); + let deposit = T::TipReportDepositBase::get() + + T::DataDepositPerByte::get() * (reason.len() as u32).into(); T::Currency::reserve(&finder, deposit)?; Reasons::::insert(&reason_hash, &reason); @@ -510,9 +510,9 @@ impl, I: 'static> Pallet { Some(m) => { member = members_iter.next(); if m < a { - continue; + continue } else { - break true; + break true } }, } diff --git a/frame/tips/src/migrations/v4.rs b/frame/tips/src/migrations/v4.rs index c0aacf8417405..5e10fa7dd2c6d 100644 --- a/frame/tips/src/migrations/v4.rs +++ b/frame/tips/src/migrations/v4.rs @@ -49,7 +49,7 @@ pub fn migrate::on_chain_storage_version(); @@ -109,7 +109,7 @@ pub fn pre_migrate< log_migration("pre-migration", storage_prefix_reasons, old_pallet_name, new_pallet_name); if new_pallet_name == old_pallet_name { - return; + return } let new_pallet_prefix = twox_128(new_pallet_name.as_bytes()); @@ -148,7 +148,7 @@ pub fn post_migrate< log_migration("post-migration", storage_prefix_reasons, old_pallet_name, new_pallet_name); if new_pallet_name == old_pallet_name { - return; + return } // Assert that no `Tips` and `Reasons` storages remains at the old prefix. diff --git a/frame/transaction-payment/asset-tx-payment/src/payment.rs b/frame/transaction-payment/asset-tx-payment/src/payment.rs index f7905cd6fcdd6..80ff4e40dcffa 100644 --- a/frame/transaction-payment/asset-tx-payment/src/payment.rs +++ b/frame/transaction-payment/asset-tx-payment/src/payment.rs @@ -130,7 +130,7 @@ where let can_withdraw = >::can_withdraw(asset_id, who, converted_fee); if !matches!(can_withdraw, WithdrawConsequence::Success) { - return Err(InvalidTransaction::Payment.into()); + return Err(InvalidTransaction::Payment.into()) } >::withdraw(asset_id, who, converted_fee) .map_err(|_| TransactionValidityError::from(InvalidTransaction::Payment)) diff --git a/frame/transaction-payment/src/lib.rs b/frame/transaction-payment/src/lib.rs index d271f585900d7..ce747fa6bd85c 100644 --- a/frame/transaction-payment/src/lib.rs +++ b/frame/transaction-payment/src/lib.rs @@ -394,15 +394,15 @@ pub mod pallet { // at most be maximum block weight. Make sure that this can fit in a multiplier without // loss. assert!( - ::max_value() - >= Multiplier::checked_from_integer::( + ::max_value() >= + Multiplier::checked_from_integer::( T::BlockWeights::get().max_block.ref_time().try_into().unwrap() ) .unwrap(), ); - let target = T::FeeMultiplierUpdate::target() - * T::BlockWeights::get().get(DispatchClass::Normal).max_total.expect( + let target = T::FeeMultiplierUpdate::target() * + T::BlockWeights::get().get(DispatchClass::Normal).max_total.expect( "Setting `max_total` for `Normal` dispatch class is not compatible with \ `transaction-payment` pallet.", ); @@ -411,7 +411,7 @@ pub mod pallet { if addition == Weight::zero() { // this is most likely because in a test setup we set everything to () // or to `ConstFeeMultiplier`. - return; + return } #[cfg(any(feature = "std", test))] @@ -1178,9 +1178,8 @@ mod tests { // fee will be proportional to what is the actual maximum weight in the runtime. assert_eq!( Balances::free_balance(&1), - (10000 - - ::BlockWeights::get().max_block.ref_time()) - as u64 + (10000 - + ::BlockWeights::get().max_block.ref_time()) as u64 ); }); } diff --git a/frame/transaction-payment/src/payment.rs b/frame/transaction-payment/src/payment.rs index 97b92d4b9e34f..ebc9c5c5afd62 100644 --- a/frame/transaction-payment/src/payment.rs +++ b/frame/transaction-payment/src/payment.rs @@ -98,7 +98,7 @@ where tip: Self::Balance, ) -> Result { if fee.is_zero() { - return Ok(None); + return Ok(None) } let withdraw_reason = if tip.is_zero() { diff --git a/frame/transaction-storage/src/lib.rs b/frame/transaction-storage/src/lib.rs index f02cae80e9ad8..07144c5617113 100644 --- a/frame/transaction-storage/src/lib.rs +++ b/frame/transaction-storage/src/lib.rs @@ -211,7 +211,7 @@ pub mod pallet { let mut index = 0; >::mutate(|transactions| { if transactions.len() + 1 > T::MaxBlockTransactions::get() as usize { - return Err(Error::::TooManyTransactions); + return Err(Error::::TooManyTransactions) } let total_chunks = transactions.last().map_or(0, |t| t.block_chunks) + chunk_count; index = transactions.len() as u32; @@ -255,7 +255,7 @@ pub mod pallet { let mut index = 0; >::mutate(|transactions| { if transactions.len() + 1 > T::MaxBlockTransactions::get() as usize { - return Err(Error::::TooManyTransactions); + return Err(Error::::TooManyTransactions) } let chunks = num_chunks(info.size); let total_chunks = transactions.last().map_or(0, |t| t.block_chunks) + chunks; diff --git a/frame/uniques/src/impl_nonfungibles.rs b/frame/uniques/src/impl_nonfungibles.rs index f2d44a7e88e21..75d193ad19605 100644 --- a/frame/uniques/src/impl_nonfungibles.rs +++ b/frame/uniques/src/impl_nonfungibles.rs @@ -137,7 +137,7 @@ impl, I: 'static> Mutate<::AccountId> for Pallet Self::do_burn(*collection, *item, |_, d| { if let Some(check_owner) = maybe_check_owner { if &d.owner != check_owner { - return Err(Error::::NoPermission.into()); + return Err(Error::::NoPermission.into()) } } Ok(()) diff --git a/frame/uniques/src/lib.rs b/frame/uniques/src/lib.rs index 0bc1c8bf22b86..185d8fc0c8edd 100644 --- a/frame/uniques/src/lib.rs +++ b/frame/uniques/src/lib.rs @@ -687,10 +687,10 @@ pub mod pallet { if T::Currency::reserve(&collection_details.owner, deposit - old).is_err() { // NOTE: No alterations made to collection_details in this iteration so far, // so this is OK to do. - continue; + continue } } else { - continue; + continue } collection_details.total_deposit.saturating_accrue(deposit); collection_details.total_deposit.saturating_reduce(old); @@ -851,7 +851,7 @@ pub mod pallet { let details = maybe_details.as_mut().ok_or(Error::::UnknownCollection)?; ensure!(origin == details.owner, Error::::NoPermission); if details.owner == owner { - return Ok(()); + return Ok(()) } // Move the deposit to the new owner. diff --git a/frame/utility/src/lib.rs b/frame/utility/src/lib.rs index 769c69475b9f2..41710be930b90 100644 --- a/frame/utility/src/lib.rs +++ b/frame/utility/src/lib.rs @@ -132,9 +132,9 @@ pub mod pallet { /// The limit on the number of batched calls. fn batched_calls_limit() -> u32 { let allocator_limit = sp_core::MAX_POSSIBLE_ALLOCATION; - let call_size = ((sp_std::mem::size_of::<::RuntimeCall>() as u32 - + CALL_ALIGN - 1) - / CALL_ALIGN) * CALL_ALIGN; + let call_size = ((sp_std::mem::size_of::<::RuntimeCall>() as u32 + + CALL_ALIGN - 1) / CALL_ALIGN) * + CALL_ALIGN; // The margin to take into account vec doubling capacity. let margin_factor = 3; @@ -205,7 +205,7 @@ pub mod pallet { ) -> DispatchResultWithPostInfo { // Do not allow the `None` origin. if ensure_none(origin.clone()).is_ok() { - return Err(BadOrigin.into()); + return Err(BadOrigin.into()) } let is_root = ensure_root(origin.clone()).is_ok(); @@ -232,7 +232,7 @@ pub mod pallet { // Take the weight of this function itself into account. let base_weight = T::WeightInfo::batch(index.saturating_add(1) as u32); // Return the actual used weight + base_weight of this call. - return Ok(Some(base_weight + weight).into()); + return Ok(Some(base_weight + weight).into()) } Self::deposit_event(Event::ItemCompleted); } @@ -326,7 +326,7 @@ pub mod pallet { ) -> DispatchResultWithPostInfo { // Do not allow the `None` origin. if ensure_none(origin.clone()).is_ok() { - return Err(BadOrigin.into()); + return Err(BadOrigin.into()) } let is_root = ensure_root(origin.clone()).is_ok(); @@ -438,7 +438,7 @@ pub mod pallet { ) -> DispatchResultWithPostInfo { // Do not allow the `None` origin. if ensure_none(origin.clone()).is_ok() { - return Err(BadOrigin.into()); + return Err(BadOrigin.into()) } let is_root = ensure_root(origin.clone()).is_ok(); diff --git a/frame/vesting/src/lib.rs b/frame/vesting/src/lib.rs index bf13793a3b43a..a92f94baf6cf9 100644 --- a/frame/vesting/src/lib.rs +++ b/frame/vesting/src/lib.rs @@ -428,7 +428,7 @@ pub mod pallet { ) -> DispatchResult { let who = ensure_signed(origin)?; if schedule1_index == schedule2_index { - return Ok(()); + return Ok(()) }; let schedule1_index = schedule1_index as usize; let schedule2_index = schedule2_index as usize; @@ -506,7 +506,7 @@ impl Pallet { // Validate user inputs. ensure!(schedule.locked() >= T::MinVestedTransfer::get(), Error::::AmountLow); if !schedule.is_valid() { - return Err(Error::::InvalidScheduleParams.into()); + return Err(Error::::InvalidScheduleParams.into()) }; let target = T::Lookup::lookup(target)?; let source = T::Lookup::lookup(source)?; @@ -654,8 +654,8 @@ impl Pallet { }; debug_assert!( - locked_now > Zero::zero() && schedules.len() > 0 - || locked_now == Zero::zero() && schedules.len() == 0 + locked_now > Zero::zero() && schedules.len() > 0 || + locked_now == Zero::zero() && schedules.len() == 0 ); Ok((schedules, locked_now)) @@ -701,13 +701,13 @@ where starting_block: T::BlockNumber, ) -> DispatchResult { if locked.is_zero() { - return Ok(()); + return Ok(()) } let vesting_schedule = VestingInfo::new(locked, per_block, starting_block); // Check for `per_block` or `locked` of 0. if !vesting_schedule.is_valid() { - return Err(Error::::InvalidScheduleParams.into()); + return Err(Error::::InvalidScheduleParams.into()) }; let mut schedules = Self::vesting(who).unwrap_or_default(); @@ -735,7 +735,7 @@ where ) -> DispatchResult { // Check for `per_block` or `locked` of 0. if !VestingInfo::new(locked, per_block, starting_block).is_valid() { - return Err(Error::::InvalidScheduleParams.into()); + return Err(Error::::InvalidScheduleParams.into()) } ensure!( diff --git a/frame/vesting/src/tests.rs b/frame/vesting/src/tests.rs index 41d01a1c3079a..cbc2e09c83199 100644 --- a/frame/vesting/src/tests.rs +++ b/frame/vesting/src/tests.rs @@ -1131,16 +1131,16 @@ fn vested_transfer_less_than_existential_deposit_fails() { ExtBuilder::default().existential_deposit(4 * ED).build().execute_with(|| { // MinVestedTransfer is less the ED. assert!( - ::Currency::minimum_balance() - > ::MinVestedTransfer::get() + ::Currency::minimum_balance() > + ::MinVestedTransfer::get() ); let sched = VestingInfo::new(::MinVestedTransfer::get() as u64, 1u64, 10u64); // The new account balance with the schedule's locked amount would be less than ED. assert!( - Balances::free_balance(&99) + sched.locked() - < ::Currency::minimum_balance() + Balances::free_balance(&99) + sched.locked() < + ::Currency::minimum_balance() ); // vested_transfer fails. diff --git a/frame/vesting/src/vesting_info.rs b/frame/vesting/src/vesting_info.rs index 2a05f25be491a..9069b69482769 100644 --- a/frame/vesting/src/vesting_info.rs +++ b/frame/vesting/src/vesting_info.rs @@ -99,8 +99,8 @@ where // the block after starting. One::one() } else { - self.locked / self.per_block() - + if (self.locked % self.per_block()).is_zero() { + self.locked / self.per_block() + + if (self.locked % self.per_block()).is_zero() { Zero::zero() } else { // `per_block` does not perfectly divide `locked`, so we need an extra block to diff --git a/primitives/api/proc-macro/src/decl_runtime_apis.rs b/primitives/api/proc-macro/src/decl_runtime_apis.rs index c71e7c0df584e..8d46047dbda5a 100644 --- a/primitives/api/proc-macro/src/decl_runtime_apis.rs +++ b/primitives/api/proc-macro/src/decl_runtime_apis.rs @@ -190,7 +190,7 @@ fn parse_renamed_attribute(renamed: &Attribute) -> Result<(String, u32)> { ); match meta { - Meta::List(list) => { + Meta::List(list) => if list.nested.len() > 2 && list.nested.is_empty() { err } else { @@ -206,8 +206,7 @@ fn parse_renamed_attribute(renamed: &Attribute) -> Result<(String, u32)> { }; Ok((old_name, version)) - } - }, + }, _ => err, } } @@ -680,7 +679,7 @@ impl CheckTraitDecl { Ok(r) => r, Err(e) => { self.errors.push(e); - return; + return }, }; @@ -722,13 +721,12 @@ impl<'ast> Visit<'ast> for CheckTraitDecl { fn visit_generic_param(&mut self, input: &'ast GenericParam) { match input { - GenericParam::Type(ty) if ty.ident == BLOCK_GENERIC_IDENT => { + GenericParam::Type(ty) if ty.ident == BLOCK_GENERIC_IDENT => self.errors.push(Error::new( input.span(), "`Block: BlockT` generic parameter will be added automatically by the \ `decl_runtime_apis!` macro!", - )) - }, + )), _ => {}, } diff --git a/primitives/api/proc-macro/src/impl_runtime_apis.rs b/primitives/api/proc-macro/src/impl_runtime_apis.rs index 7656c710fb950..c3f4e36655d22 100644 --- a/primitives/api/proc-macro/src/impl_runtime_apis.rs +++ b/primitives/api/proc-macro/src/impl_runtime_apis.rs @@ -366,7 +366,7 @@ fn extend_with_api_version(mut trait_: Path, version: Option) -> Path { v } else { // nothing to do - return trait_; + return trait_ }; let trait_name = &mut trait_ @@ -598,7 +598,7 @@ fn generate_runtime_api_versions(impls: &[ItemImpl]) -> Result { "Two traits with the same name detected! \ The trait name is used to generate its ID. \ Please rename one trait at the declaration!", - )); + )) } let id: Path = parse_quote!( #path ID ); @@ -681,7 +681,7 @@ fn extract_api_version(attrs: &Vec, span: Span) -> Result Each runtime API can have only one version.", API_VERSION_ATTRIBUTE ), - )); + )) } // Parse the runtime version if there exists one. diff --git a/primitives/api/proc-macro/src/mock_impl_runtime_apis.rs b/primitives/api/proc-macro/src/mock_impl_runtime_apis.rs index 27c91857d0f93..e43a302e18923 100644 --- a/primitives/api/proc-macro/src/mock_impl_runtime_apis.rs +++ b/primitives/api/proc-macro/src/mock_impl_runtime_apis.rs @@ -225,7 +225,7 @@ fn get_at_param_name( takes at least one argument, the `BlockId`.", ADVANCED_ATTRIBUTE, ), - )); + )) } // `param_names` and `param_types` have the same length, so if `param_names` is not empty @@ -236,7 +236,7 @@ fn get_at_param_name( return Err(Error::new( span, "`BlockId` needs to be taken by reference and not by value!", - )); + )) } let name = param_names.remove(0); @@ -415,7 +415,7 @@ fn generate_runtime_api_impls(impls: &[ItemImpl]) -> Result { + Some(self_ty) => if self_ty == impl_.self_ty { Some(self_ty) } else { @@ -426,14 +426,13 @@ fn generate_runtime_api_impls(impls: &[ItemImpl]) -> Result Some(impl_.self_ty.clone()), }; global_block_type = match global_block_type.take() { - Some(global_block_type) => { + Some(global_block_type) => if global_block_type == *block_type { Some(global_block_type) } else { @@ -447,9 +446,8 @@ fn generate_runtime_api_impls(impls: &[ItemImpl]) -> Result Some(block_type.clone()), }; diff --git a/primitives/api/proc-macro/src/utils.rs b/primitives/api/proc-macro/src/utils.rs index 737015cc7e662..2ccd050cfb151 100644 --- a/primitives/api/proc-macro/src/utils.rs +++ b/primitives/api/proc-macro/src/utils.rs @@ -151,14 +151,12 @@ pub fn extract_parameter_names_types_and_borrows( generate_unique_pattern((*arg.pat).clone(), &mut generated_pattern_counter); result.push((name, ty, borrow)); }, - FnArg::Receiver(_) if matches!(allow_self, AllowSelfRefInParameters::No) => { - return Err(Error::new(input.span(), "`self` parameter not supported!")) - }, - FnArg::Receiver(recv) => { + FnArg::Receiver(_) if matches!(allow_self, AllowSelfRefInParameters::No) => + return Err(Error::new(input.span(), "`self` parameter not supported!")), + FnArg::Receiver(recv) => if recv.mutability.is_some() || recv.reference.is_none() { - return Err(Error::new(recv.span(), "Only `&self` is supported!")); - } - }, + return Err(Error::new(recv.span(), "Only `&self` is supported!")) + }, } } @@ -224,9 +222,8 @@ pub fn extract_block_type_from_trait_path(trait_: &Path) -> Result<&TypePath> { let span = trait_.segments.last().as_ref().unwrap().span(); Err(Error::new(span, "Missing `Block` generic parameter.")) }, - PathArguments::Parenthesized(_) => { - Err(Error::new(generics.arguments.span(), "Unexpected parentheses in path!")) - }, + PathArguments::Parenthesized(_) => + Err(Error::new(generics.arguments.span(), "Unexpected parentheses in path!")), } } diff --git a/primitives/api/test/tests/trybuild.rs b/primitives/api/test/tests/trybuild.rs index 7ef35f6452555..13af1ded7dc6b 100644 --- a/primitives/api/test/tests/trybuild.rs +++ b/primitives/api/test/tests/trybuild.rs @@ -22,7 +22,7 @@ use std::env; fn ui() { // Only run the ui tests when `RUN_UI_TESTS` is set. if env::var("RUN_UI_TESTS").is_err() { - return; + return } // As trybuild is using `cargo check`, we don't need the real WASM binaries. diff --git a/primitives/arithmetic/fuzzer/src/biguint.rs b/primitives/arithmetic/fuzzer/src/biguint.rs index 61fd3fff9d5db..f49743a4b8a69 100644 --- a/primitives/arithmetic/fuzzer/src/biguint.rs +++ b/primitives/arithmetic/fuzzer/src/biguint.rs @@ -106,7 +106,7 @@ fn main() { if check_digit_lengths(&u, &v, 4) { let (ue, ve) = (ue.unwrap(), ve.unwrap()); if ve == 0 { - return; + return } let (q, r) = (ue / ve, ue % ve); if let Some((qq, rr)) = u.clone().div(&v, true) { diff --git a/primitives/arithmetic/fuzzer/src/fixed_point.rs b/primitives/arithmetic/fuzzer/src/fixed_point.rs index de8f046d8bb26..c1b93f8c63a11 100644 --- a/primitives/arithmetic/fuzzer/src/fixed_point.rs +++ b/primitives/arithmetic/fuzzer/src/fixed_point.rs @@ -76,8 +76,8 @@ fn main() { let a = FixedI64::saturating_from_rational(2, 5); let b = a.saturating_mul_acc_int(x); let xx = FixedI64::saturating_from_integer(x); - let d = a.saturating_mul(xx).saturating_add(xx).into_inner() as i128 - / FixedI64::accuracy() as i128; + let d = a.saturating_mul(xx).saturating_add(xx).into_inner() as i128 / + FixedI64::accuracy() as i128; assert_eq!(b, d); }); } diff --git a/primitives/arithmetic/fuzzer/src/multiply_by_rational_with_rounding.rs b/primitives/arithmetic/fuzzer/src/multiply_by_rational_with_rounding.rs index c5939fd7601c1..474b2d363eccd 100644 --- a/primitives/arithmetic/fuzzer/src/multiply_by_rational_with_rounding.rs +++ b/primitives/arithmetic/fuzzer/src/multiply_by_rational_with_rounding.rs @@ -62,7 +62,7 @@ fn main() { fn mul_div(a: u128, b: u128, c: u128) -> u128 { use primitive_types::U256; if a.is_zero() { - return Zero::zero(); + return Zero::zero() } let c = c.max(1); diff --git a/primitives/arithmetic/fuzzer/src/normalize.rs b/primitives/arithmetic/fuzzer/src/normalize.rs index afd92b05517c5..3d2d6eb9acfcc 100644 --- a/primitives/arithmetic/fuzzer/src/normalize.rs +++ b/primitives/arithmetic/fuzzer/src/normalize.rs @@ -36,7 +36,7 @@ fn main() { fuzz!(|data: (Vec, Ty)| { let (data, norm) = data; if data.is_empty() { - return; + return } let pre_sum: u128 = data.iter().map(|x| *x as u128).sum(); diff --git a/primitives/arithmetic/src/biguint.rs b/primitives/arithmetic/src/biguint.rs index ded04f09ea7a1..33f0960ee378c 100644 --- a/primitives/arithmetic/src/biguint.rs +++ b/primitives/arithmetic/src/biguint.rs @@ -159,7 +159,7 @@ impl BigUint { // has the ability to cause this. There is nothing to do if the number already has 1 // limb only. call it a day and return. if self.len().is_zero() { - return; + return } let index = self.digits.iter().position(|&elem| elem != 0).unwrap_or(self.len() - 1); @@ -173,7 +173,7 @@ impl BigUint { pub fn lpad(&mut self, size: usize) { let n = self.len(); if n >= size { - return; + return } let pad = size - n; let mut new_digits = (0..pad).map(|_| 0).collect::>(); @@ -266,15 +266,15 @@ impl BigUint { if self.get(j) == 0 { // Note: `with_capacity` allocates with 0. Explicitly set j + m to zero if // otherwise. - continue; + continue } let mut k = 0; for i in 0..m { // PROOF: (B−1) × (B−1) + (B−1) + (B−1) = B^2 −1 < B^2. addition is safe. - let t = mul_single(self.get(j), other.get(i)) - + Double::from(w.get(i + j)) - + Double::from(k); + let t = mul_single(self.get(j), other.get(i)) + + Double::from(w.get(i + j)) + + Double::from(k); w.set(i + j, (t % B) as Single); // PROOF: (B^2 - 1) / B < B. conversion is safe. k = (t / B) as Single; @@ -318,7 +318,7 @@ impl BigUint { /// Taken from "The Art of Computer Programming" by D.E. Knuth, vol 2, chapter 4. pub fn div(self, other: &Self, rem: bool) -> Option<(Self, Self)> { if other.len() <= 1 || other.msb() == 0 || self.msb() == 0 || self.len() <= other.len() { - return None; + return None } let n = other.len(); let m = self.len() - n; @@ -378,7 +378,7 @@ impl BigUint { test(); while (*rhat.borrow() as Double) < B { if !test() { - break; + break } } diff --git a/primitives/arithmetic/src/fixed_point.rs b/primitives/arithmetic/src/fixed_point.rs index 5f73d64591f4e..bf3c93cdad605 100644 --- a/primitives/arithmetic/src/fixed_point.rs +++ b/primitives/arithmetic/src/fixed_point.rs @@ -144,7 +144,7 @@ pub trait FixedPointNumber: d: D, ) -> Option { if d == D::zero() { - return None; + return None } let n: I129 = n.into(); @@ -529,10 +529,10 @@ macro_rules! implement_fixed { /// `None` is returned. pub const fn try_sqrt(self) -> Option { if self.0 == 0 { - return Some(Self(0)); + return Some(Self(0)) } if self.0 < 1 { - return None; + return None } let v = self.0 as u128; @@ -623,7 +623,7 @@ macro_rules! implement_fixed { } else { let unsigned_inner = n.value as $inner_type; if unsigned_inner as u128 != n.value || (unsigned_inner > 0) != (n.value > 0) { - return None; + return None }; if n.negative { match unsigned_inner.checked_neg() { @@ -717,7 +717,7 @@ macro_rules! implement_fixed { rounding: SignedRounding, ) -> Option { if other.0 == 0 { - return None; + return None } let lhs = self.into_i129(); @@ -751,7 +751,7 @@ macro_rules! implement_fixed { fn saturating_pow(self, exp: usize) -> Self { if exp == 0 { - return Self::saturating_from_integer(1); + return Self::saturating_from_integer(1) } let exp = exp as u32; @@ -829,7 +829,7 @@ macro_rules! implement_fixed { impl CheckedDiv for $name { fn checked_div(&self, other: &Self) -> Option { if other.0 == 0 { - return None; + return None } let lhs: I129 = self.0.into(); diff --git a/primitives/arithmetic/src/helpers_128bit.rs b/primitives/arithmetic/src/helpers_128bit.rs index b100aa09a55aa..7938c31d1eaa6 100644 --- a/primitives/arithmetic/src/helpers_128bit.rs +++ b/primitives/arithmetic/src/helpers_128bit.rs @@ -155,7 +155,7 @@ mod double128 { pub const fn div(mut self, rhs: u128) -> (Self, u128) { if rhs == 1 { - return (self, 0); + return (self, 0) } // (self === a; rhs === b) @@ -192,7 +192,7 @@ pub const fn multiply_by_rational_with_rounding( ) -> Option { use double128::Double128; if c == 0 { - return None; + return None } let (result, remainder) = Double128::product_of(a, b).div(c); let mut result: u128 = match result.try_into_u128() { @@ -217,7 +217,7 @@ pub const fn multiply_by_rational_with_rounding( pub const fn sqrt(mut n: u128) -> u128 { // Modified from https://github.com/derekdreery/integer-sqrt-rs (Apache/MIT). if n == 0 { - return 0; + return 0 } // Compute bit, the largest power of 4 <= n diff --git a/primitives/arithmetic/src/lib.rs b/primitives/arithmetic/src/lib.rs index 50f2af2c1b175..244242c0f7580 100644 --- a/primitives/arithmetic/src/lib.rs +++ b/primitives/arithmetic/src/lib.rs @@ -68,7 +68,7 @@ where fn tcmp(&self, other: &T, threshold: T) -> Ordering { // early exit. if threshold.is_zero() { - return self.cmp(other); + return self.cmp(other) } let upper_bound = other.saturating_add(threshold); @@ -173,12 +173,12 @@ where // Nothing to do here. if count.is_zero() { - return Ok(Vec::::new()); + return Ok(Vec::::new()) } let diff = targeted_sum.max(sum) - targeted_sum.min(sum); if diff.is_zero() { - return Ok(input.to_vec()); + return Ok(input.to_vec()) } let needs_bump = targeted_sum > sum; diff --git a/primitives/arithmetic/src/per_things.rs b/primitives/arithmetic/src/per_things.rs index c24bbd300d175..baf4d95973fa0 100644 --- a/primitives/arithmetic/src/per_things.rs +++ b/primitives/arithmetic/src/per_things.rs @@ -92,7 +92,7 @@ pub trait PerThing: /// Return the next lower value to `self` or `self` if it is already zero. fn less_epsilon(self) -> Self { if self.is_zero() { - return self; + return self } Self::from_parts(self.deconstruct() - One::one()) } @@ -101,7 +101,7 @@ pub trait PerThing: /// zero. fn try_less_epsilon(self) -> Result { if self.is_zero() { - return Err(self); + return Err(self) } Ok(Self::from_parts(self.deconstruct() - One::one())) } @@ -109,7 +109,7 @@ pub trait PerThing: /// Return the next higher value to `self` or `self` if it is already one. fn plus_epsilon(self) -> Self { if self.is_one() { - return self; + return self } Self::from_parts(self.deconstruct() + One::one()) } @@ -118,7 +118,7 @@ pub trait PerThing: /// one. fn try_plus_epsilon(self) -> Result { if self.is_one() { - return Err(self); + return Err(self) } Ok(Self::from_parts(self.deconstruct() + One::one())) } @@ -464,12 +464,10 @@ impl Rounding { match (rounding, negative) { (Low, true) | (Major, _) | (High, false) => Up, (High, true) | (Minor, _) | (Low, false) => Down, - (NearestPrefMajor, _) | (NearestPrefHigh, false) | (NearestPrefLow, true) => { - NearestPrefUp - }, - (NearestPrefMinor, _) | (NearestPrefLow, false) | (NearestPrefHigh, true) => { - NearestPrefDown - }, + (NearestPrefMajor, _) | (NearestPrefHigh, false) | (NearestPrefLow, true) => + NearestPrefUp, + (NearestPrefMinor, _) | (NearestPrefLow, false) | (NearestPrefHigh, true) => + NearestPrefDown, } } } diff --git a/primitives/arithmetic/src/rational.rs b/primitives/arithmetic/src/rational.rs index 55f57f1670469..54cabfc6214e8 100644 --- a/primitives/arithmetic/src/rational.rs +++ b/primitives/arithmetic/src/rational.rs @@ -166,7 +166,7 @@ impl Rational128 { pub fn lcm(&self, other: &Self) -> Option { // this should be tested better: two large numbers that are almost the same. if self.1 == other.1 { - return Some(self.1); + return Some(self.1) } let g = helpers_128bit::gcd(self.1, other.1); helpers_128bit::multiply_by_rational_with_rounding( @@ -300,7 +300,7 @@ mod tests { fn mul_div(a: u128, b: u128, c: u128) -> u128 { use primitive_types::U256; if a.is_zero() { - return Zero::zero(); + return Zero::zero() } let c = c.max(1); diff --git a/primitives/authorship/src/lib.rs b/primitives/authorship/src/lib.rs index bd996db7b3d01..7ea19d9ea5ff5 100644 --- a/primitives/authorship/src/lib.rs +++ b/primitives/authorship/src/lib.rs @@ -91,7 +91,7 @@ impl sp_inherents::InherentDataProvider for InherentDataProvider mut error: &[u8], ) -> Option> { if *identifier != INHERENT_IDENTIFIER { - return None; + return None } let error = InherentError::decode(&mut error).ok()?; diff --git a/primitives/blockchain/src/backend.rs b/primitives/blockchain/src/backend.rs index 31cc4c46d4747..dea3a7f285117 100644 --- a/primitives/blockchain/src/backend.rs +++ b/primitives/blockchain/src/backend.rs @@ -139,7 +139,7 @@ pub trait Backend: if let Some(max_number) = maybe_max_number { // target outside search range if target_header.number() > &max_number { - return Ok(None); + return Ok(None) } } @@ -160,12 +160,12 @@ pub trait Backend: // provided, we continue to search from all leaves below. if let Some(max_number) = maybe_max_number { if let Some(header) = self.hash(max_number)? { - return Ok(Some(header)); + return Ok(Some(header)) } } } else if info.finalized_number >= *target_header.number() { // header is on a dead fork. - return Ok(None); + return Ok(None) } self.leaves()? @@ -189,7 +189,7 @@ pub trait Backend: if current_header.number() <= &max_number { best_hash = current_header.hash(); - break; + break } current_hash = *current_header.parent_hash(); @@ -200,7 +200,7 @@ pub trait Backend: loop { // until we find target if current_hash == target_hash { - return Ok(Some(best_hash)); + return Ok(Some(best_hash)) } let current_header = self @@ -209,7 +209,7 @@ pub trait Backend: // stop search in this chain once we go below the target's block number if current_header.number() < target_header.number() { - break; + break } current_hash = *current_header.parent_hash(); diff --git a/primitives/blockchain/src/header_metadata.rs b/primitives/blockchain/src/header_metadata.rs index f810aba529731..87ac44459987e 100644 --- a/primitives/blockchain/src/header_metadata.rs +++ b/primitives/blockchain/src/header_metadata.rs @@ -39,12 +39,12 @@ pub fn lowest_common_ancestor + ?Sized>( ) -> Result, T::Error> { let mut header_one = backend.header_metadata(id_one)?; if header_one.parent == id_two { - return Ok(HashAndNumber { hash: id_two, number: header_one.number - One::one() }); + return Ok(HashAndNumber { hash: id_two, number: header_one.number - One::one() }) } let mut header_two = backend.header_metadata(id_two)?; if header_two.parent == id_one { - return Ok(HashAndNumber { hash: id_one, number: header_one.number }); + return Ok(HashAndNumber { hash: id_one, number: header_one.number }) } let mut orig_header_one = header_one.clone(); @@ -58,7 +58,7 @@ pub fn lowest_common_ancestor + ?Sized>( if ancestor_one.number >= header_two.number { header_one = ancestor_one; } else { - break; + break } } @@ -68,7 +68,7 @@ pub fn lowest_common_ancestor + ?Sized>( if ancestor_two.number >= header_one.number { header_two = ancestor_two; } else { - break; + break } } diff --git a/primitives/consensus/babe/src/digests.rs b/primitives/consensus/babe/src/digests.rs index 47bbb41d61fbd..1e4c820379d7a 100644 --- a/primitives/consensus/babe/src/digests.rs +++ b/primitives/consensus/babe/src/digests.rs @@ -121,9 +121,8 @@ impl PreDigest { pub fn vrf(&self) -> Option<(&VRFOutput, &VRFProof)> { match self { PreDigest::Primary(primary) => Some((&primary.vrf_output, &primary.vrf_proof)), - PreDigest::SecondaryVRF(secondary) => { - Some((&secondary.vrf_output, &secondary.vrf_proof)) - }, + PreDigest::SecondaryVRF(secondary) => + Some((&secondary.vrf_output, &secondary.vrf_proof)), PreDigest::SecondaryPlain(_) => None, } } diff --git a/primitives/consensus/babe/src/lib.rs b/primitives/consensus/babe/src/lib.rs index 71b94ea005802..621ab859b914f 100644 --- a/primitives/consensus/babe/src/lib.rs +++ b/primitives/consensus/babe/src/lib.rs @@ -279,7 +279,7 @@ where let pre_hash = header.hash(); if !offender.verify(&pre_hash.as_ref(), &seal) { - return None; + return None } Some(()) @@ -288,7 +288,7 @@ where let verify_proof = || { // we must have different headers for the equivocation to be valid if proof.first_header.hash() == proof.second_header.hash() { - return None; + return None } let first_pre_digest = find_pre_digest(&proof.first_header)?; @@ -296,15 +296,15 @@ where // both headers must be targetting the same slot and it must // be the same as the one in the proof. - if proof.slot != first_pre_digest.slot() - || first_pre_digest.slot() != second_pre_digest.slot() + if proof.slot != first_pre_digest.slot() || + first_pre_digest.slot() != second_pre_digest.slot() { - return None; + return None } // both headers must have been authored by the same authority if first_pre_digest.authority_index() != second_pre_digest.authority_index() { - return None; + return None } // we finally verify that the expected authority has signed both headers and diff --git a/primitives/consensus/vrf/src/schnorrkel.rs b/primitives/consensus/vrf/src/schnorrkel.rs index ab37524e41e50..8666de6c4bc0c 100644 --- a/primitives/consensus/vrf/src/schnorrkel.rs +++ b/primitives/consensus/vrf/src/schnorrkel.rs @@ -163,33 +163,24 @@ fn convert_error(e: SignatureError) -> codec::Error { ScalarFormatError => "Signature error: `ScalarFormatError`".into(), NotMarkedSchnorrkel => "Signature error: `NotMarkedSchnorrkel`".into(), BytesLengthError { .. } => "Signature error: `BytesLengthError`".into(), - MuSigAbsent { musig_stage: Commitment } => { - "Signature error: `MuSigAbsent` at stage `Commitment`".into() - }, - MuSigAbsent { musig_stage: Reveal } => { - "Signature error: `MuSigAbsent` at stage `Reveal`".into() - }, - MuSigAbsent { musig_stage: Cosignature } => { - "Signature error: `MuSigAbsent` at stage `Commitment`".into() - }, - MuSigInconsistent { musig_stage: Commitment, duplicate: true } => { - "Signature error: `MuSigInconsistent` at stage `Commitment` on duplicate".into() - }, - MuSigInconsistent { musig_stage: Commitment, duplicate: false } => { - "Signature error: `MuSigInconsistent` at stage `Commitment` on not duplicate".into() - }, - MuSigInconsistent { musig_stage: Reveal, duplicate: true } => { - "Signature error: `MuSigInconsistent` at stage `Reveal` on duplicate".into() - }, - MuSigInconsistent { musig_stage: Reveal, duplicate: false } => { - "Signature error: `MuSigInconsistent` at stage `Reveal` on not duplicate".into() - }, - MuSigInconsistent { musig_stage: Cosignature, duplicate: true } => { - "Signature error: `MuSigInconsistent` at stage `Cosignature` on duplicate".into() - }, - MuSigInconsistent { musig_stage: Cosignature, duplicate: false } => { - "Signature error: `MuSigInconsistent` at stage `Cosignature` on not duplicate".into() - }, + MuSigAbsent { musig_stage: Commitment } => + "Signature error: `MuSigAbsent` at stage `Commitment`".into(), + MuSigAbsent { musig_stage: Reveal } => + "Signature error: `MuSigAbsent` at stage `Reveal`".into(), + MuSigAbsent { musig_stage: Cosignature } => + "Signature error: `MuSigAbsent` at stage `Commitment`".into(), + MuSigInconsistent { musig_stage: Commitment, duplicate: true } => + "Signature error: `MuSigInconsistent` at stage `Commitment` on duplicate".into(), + MuSigInconsistent { musig_stage: Commitment, duplicate: false } => + "Signature error: `MuSigInconsistent` at stage `Commitment` on not duplicate".into(), + MuSigInconsistent { musig_stage: Reveal, duplicate: true } => + "Signature error: `MuSigInconsistent` at stage `Reveal` on duplicate".into(), + MuSigInconsistent { musig_stage: Reveal, duplicate: false } => + "Signature error: `MuSigInconsistent` at stage `Reveal` on not duplicate".into(), + MuSigInconsistent { musig_stage: Cosignature, duplicate: true } => + "Signature error: `MuSigInconsistent` at stage `Cosignature` on duplicate".into(), + MuSigInconsistent { musig_stage: Cosignature, duplicate: false } => + "Signature error: `MuSigInconsistent` at stage `Cosignature` on not duplicate".into(), } } diff --git a/primitives/core/hashing/proc-macro/src/impls.rs b/primitives/core/hashing/proc-macro/src/impls.rs index 42e35896aff97..3058cf019b143 100644 --- a/primitives/core/hashing/proc-macro/src/impls.rs +++ b/primitives/core/hashing/proc-macro/src/impls.rs @@ -48,22 +48,20 @@ impl Parse for InputBytes { syn::Expr::Lit(lit) => match &lit.lit { syn::Lit::Int(b) => bytes.push(b.base10_parse()?), syn::Lit::Byte(b) => bytes.push(b.value()), - _ => { + _ => return Err(syn::Error::new( input.span(), "Expected array of u8 elements.".to_string(), - )) - }, + )), }, - _ => { + _ => return Err(syn::Error::new( input.span(), "Expected array of u8 elements.".to_string(), - )) - }, + )), } } - return Ok(InputBytes(bytes)); + return Ok(InputBytes(bytes)) }, Err(_e) => (), } diff --git a/primitives/core/src/bounded/bounded_btree_map.rs b/primitives/core/src/bounded/bounded_btree_map.rs index a35db28577c05..d2c148d6de9c5 100644 --- a/primitives/core/src/bounded/bounded_btree_map.rs +++ b/primitives/core/src/bounded/bounded_btree_map.rs @@ -41,7 +41,7 @@ where fn decode(input: &mut I) -> Result { let inner = BTreeMap::::decode(input)?; if inner.len() > S::get() as usize { - return Err("BoundedBTreeMap exceeds its limit".into()); + return Err("BoundedBTreeMap exceeds its limit".into()) } Ok(Self(inner, PhantomData)) } diff --git a/primitives/core/src/bounded/bounded_btree_set.rs b/primitives/core/src/bounded/bounded_btree_set.rs index fc776dc8ee056..5feac6b7150f0 100644 --- a/primitives/core/src/bounded/bounded_btree_set.rs +++ b/primitives/core/src/bounded/bounded_btree_set.rs @@ -40,7 +40,7 @@ where fn decode(input: &mut I) -> Result { let inner = BTreeSet::::decode(input)?; if inner.len() > S::get() as usize { - return Err("BoundedBTreeSet exceeds its limit".into()); + return Err("BoundedBTreeSet exceeds its limit".into()) } Ok(Self(inner, PhantomData)) } diff --git a/primitives/core/src/bounded/bounded_vec.rs b/primitives/core/src/bounded/bounded_vec.rs index 96f7792f831c5..2f39f3340ce50 100644 --- a/primitives/core/src/bounded/bounded_vec.rs +++ b/primitives/core/src/bounded/bounded_vec.rs @@ -91,7 +91,7 @@ where while let Some(value) = seq.next_element()? { values.push(value); if values.len() > max { - return Err(A::Error::custom("out of bounds")); + return Err(A::Error::custom("out of bounds")) } } @@ -310,7 +310,7 @@ impl> Decode for BoundedVec { fn decode(input: &mut I) -> Result { let inner = Vec::::decode(input)?; if inner.len() > S::get() as usize { - return Err("BoundedVec exceeds its limit".into()); + return Err("BoundedVec exceeds its limit".into()) } Ok(Self(inner, PhantomData)) } @@ -494,7 +494,7 @@ impl> BoundedVec { Ok(None) } else { if index == 0 { - return Err(()); + return Err(()) } sp_std::mem::swap(&mut self[0], &mut element); // `[0..index] cannot panic since self.len() >= index. @@ -517,11 +517,11 @@ impl> BoundedVec { pub fn force_insert_keep_left(&mut self, index: usize, element: T) -> Result, ()> { // Check against panics. if Self::bound() < index || self.len() < index || Self::bound() == 0 { - return Err(()); + return Err(()) } // Noop condition. if Self::bound() == index && self.len() <= Self::bound() { - return Err(()); + return Err(()) } let maybe_removed = if self.is_full() { // defensive-only: since we are at capacity, this is a noop. @@ -549,11 +549,11 @@ impl> BoundedVec { pub fn slide(&mut self, index: usize, insert_position: usize) -> bool { // Check against panics. if self.len() <= index || self.len() < insert_position || index == usize::MAX { - return false; + return false } // Noop conditions. if index == insert_position || index + 1 == insert_position { - return false; + return false } if insert_position < index && index < self.len() { // --- --- --- === === === === @@@ --- --- --- @@ -566,7 +566,7 @@ impl> BoundedVec { // --- --- --- @@@ === === === === --- --- --- // ^N^ self[insert_position..index + 1].rotate_right(1); - return true; + return true } else if insert_position > 0 && index + 1 < insert_position { // Note that the apparent asymmetry of these two branches is due to the // fact that the "new" position is the position to be inserted *before*. @@ -580,7 +580,7 @@ impl> BoundedVec { // --- --- --- === === === === @@@ --- --- --- // ^N^ self[index..insert_position].rotate_left(1); - return true; + return true } debug_assert!(false, "all noop conditions should have been covered above"); diff --git a/primitives/core/src/crypto.rs b/primitives/core/src/crypto.rs index e5faf54cab92c..06703acea7202 100644 --- a/primitives/core/src/crypto.rs +++ b/primitives/core/src/crypto.rs @@ -265,7 +265,7 @@ pub trait Ss58Codec: Sized + AsMut<[u8]> + AsRef<[u8]> + ByteArray { let data = s.from_base58().map_err(|_| PublicError::BadBase58)?; if data.len() < 2 { - return Err(PublicError::BadLength); + return Err(PublicError::BadLength) } let (prefix_len, ident) = match data[0] { 0..=63 => (1, data[0] as u16), @@ -282,18 +282,18 @@ pub trait Ss58Codec: Sized + AsMut<[u8]> + AsRef<[u8]> + ByteArray { _ => return Err(PublicError::InvalidPrefix), }; if data.len() != prefix_len + body_len + CHECKSUM_LEN { - return Err(PublicError::BadLength); + return Err(PublicError::BadLength) } let format = ident.into(); if !Self::format_is_allowed(format) { - return Err(PublicError::FormatNotAllowed); + return Err(PublicError::FormatNotAllowed) } let hash = ss58hash(&data[0..body_len + prefix_len]); let checksum = &hash[0..CHECKSUM_LEN]; if data[body_len + prefix_len..body_len + prefix_len + CHECKSUM_LEN] != *checksum { // Invalid checksum. - return Err(PublicError::InvalidChecksum); + return Err(PublicError::InvalidChecksum) } let result = Self::from_slice(&data[prefix_len..body_len + prefix_len]) @@ -1068,7 +1068,7 @@ impl<'a> TryFrom<&'a str> for KeyTypeId { fn try_from(x: &'a str) -> Result { let b = x.as_bytes(); if b.len() != 4 { - return Err(()); + return Err(()) } let mut res = KeyTypeId::default(); res.0.copy_from_slice(&b[0..4]); @@ -1227,16 +1227,14 @@ mod tests { password, path: path.into_iter().chain(path_iter).collect(), }, - TestPair::GeneratedFromPhrase { phrase, password } => { - TestPair::Standard { phrase, password, path: path_iter.collect() } - }, - x => { + TestPair::GeneratedFromPhrase { phrase, password } => + TestPair::Standard { phrase, password, path: path_iter.collect() }, + x => if path_iter.count() == 0 { x } else { - return Err(()); - } - }, + return Err(()) + }, }, None, )) diff --git a/primitives/core/src/ecdsa.rs b/primitives/core/src/ecdsa.rs index e3bd99116cbd1..ca6b800625bc2 100644 --- a/primitives/core/src/ecdsa.rs +++ b/primitives/core/src/ecdsa.rs @@ -142,7 +142,7 @@ impl TryFrom<&[u8]> for Public { fn try_from(data: &[u8]) -> Result { if data.len() != Self::LEN { - return Err(()); + return Err(()) } let mut r = [0u8; Self::LEN]; r.copy_from_slice(data); @@ -317,7 +317,7 @@ impl Signature { /// you are certain that the array actually is a signature. GIGO! pub fn from_slice(data: &[u8]) -> Option { if data.len() != 65 { - return None; + return None } let mut r = [0u8; 65]; r.copy_from_slice(data); diff --git a/primitives/core/src/ed25519.rs b/primitives/core/src/ed25519.rs index 2f5c2212eed36..e85eb87c9fd83 100644 --- a/primitives/core/src/ed25519.rs +++ b/primitives/core/src/ed25519.rs @@ -114,7 +114,7 @@ impl TryFrom<&[u8]> for Public { fn try_from(data: &[u8]) -> Result { if data.len() != Self::LEN { - return Err(()); + return Err(()) } let mut r = [0u8; Self::LEN]; r.copy_from_slice(data); @@ -316,7 +316,7 @@ impl Signature { /// you are certain that the array actually is a signature. GIGO! pub fn from_slice(data: &[u8]) -> Option { if data.len() != 64 { - return None; + return None } let mut r = [0u8; 64]; r.copy_from_slice(data); diff --git a/primitives/core/src/lib.rs b/primitives/core/src/lib.rs index 77dab804c10e5..fda7604d5337f 100644 --- a/primitives/core/src/lib.rs +++ b/primitives/core/src/lib.rs @@ -116,11 +116,10 @@ impl ExecutionContext { Importing | Syncing | BlockConstruction => offchain::Capabilities::empty(), // Enable keystore, transaction pool and Offchain DB reads by default for offchain // calls. - OffchainCall(None) => { - offchain::Capabilities::KEYSTORE - | offchain::Capabilities::OFFCHAIN_DB_READ - | offchain::Capabilities::TRANSACTION_POOL - }, + OffchainCall(None) => + offchain::Capabilities::KEYSTORE | + offchain::Capabilities::OFFCHAIN_DB_READ | + offchain::Capabilities::TRANSACTION_POOL, OffchainCall(Some((_, capabilities))) => *capabilities, } } diff --git a/primitives/core/src/offchain/storage.rs b/primitives/core/src/offchain/storage.rs index 6e6238c651c7b..cf2c93641f245 100644 --- a/primitives/core/src/offchain/storage.rs +++ b/primitives/core/src/offchain/storage.rs @@ -73,14 +73,13 @@ impl OffchainStorage for InMemOffchainStorage { let key = prefix.iter().chain(key).cloned().collect(); match self.storage.entry(key) { - Entry::Vacant(entry) => { + Entry::Vacant(entry) => if old_value.is_none() { entry.insert(new_value.to_vec()); true } else { false - } - }, + }, Entry::Occupied(ref mut entry) if Some(entry.get().as_slice()) == old_value => { entry.insert(new_value.to_vec()); true diff --git a/primitives/core/src/offchain/testing.rs b/primitives/core/src/offchain/testing.rs index 0e2d57ba2f360..a2065eb17717f 100644 --- a/primitives/core/src/offchain/testing.rs +++ b/primitives/core/src/offchain/testing.rs @@ -80,9 +80,8 @@ impl TestPersistentOffchainDB { let mut me = self.persistent.write(); for ((_prefix, key), value_operation) in changes { match value_operation { - OffchainOverlayedChange::SetValue(val) => { - me.set(Self::PREFIX, key.as_slice(), val.as_slice()) - }, + OffchainOverlayedChange::SetValue(val) => + me.set(Self::PREFIX, key.as_slice(), val.as_slice()), OffchainOverlayedChange::Remove => me.remove(Self::PREFIX, key.as_slice()), } } @@ -382,12 +381,10 @@ impl offchain::DbExternalities for TestOffchainExt { ) -> bool { let mut state = self.0.write(); match kind { - StorageKind::LOCAL => { - state.local_storage.compare_and_set(b"", key, old_value, new_value) - }, - StorageKind::PERSISTENT => { - state.persistent_storage.compare_and_set(b"", key, old_value, new_value) - }, + StorageKind::LOCAL => + state.local_storage.compare_and_set(b"", key, old_value, new_value), + StorageKind::PERSISTENT => + state.persistent_storage.compare_and_set(b"", key, old_value, new_value), } } diff --git a/primitives/core/src/sr25519.rs b/primitives/core/src/sr25519.rs index 93c33d1320fe9..9064fb7427393 100644 --- a/primitives/core/src/sr25519.rs +++ b/primitives/core/src/sr25519.rs @@ -145,7 +145,7 @@ impl TryFrom<&[u8]> for Public { fn try_from(data: &[u8]) -> Result { if data.len() != Self::LEN { - return Err(()); + return Err(()) } let mut r = [0u8; 32]; r.copy_from_slice(data); @@ -341,7 +341,7 @@ impl Signature { /// you are certain that the array actually is a signature. GIGO! pub fn from_slice(data: &[u8]) -> Option { if data.len() != 64 { - return None; + return None } let mut r = [0u8; 64]; r.copy_from_slice(data); diff --git a/primitives/database/src/kvdb.rs b/primitives/database/src/kvdb.rs index dd6798c2359e7..5fe5fda307a1e 100644 --- a/primitives/database/src/kvdb.rs +++ b/primitives/database/src/kvdb.rs @@ -53,7 +53,7 @@ impl DbAdapter { return Err(error::DatabaseError(Box::new(std::io::Error::new( std::io::ErrorKind::Other, format!("Unexpected counter len {}", data.len()), - )))); + )))) } counter_data.copy_from_slice(&data); let counter = u32::from_le_bytes(counter_data); diff --git a/primitives/debug-derive/src/impls.rs b/primitives/debug-derive/src/impls.rs index 72dab228450f9..51a4d876c79b6 100644 --- a/primitives/debug-derive/src/impls.rs +++ b/primitives/debug-derive/src/impls.rs @@ -179,9 +179,8 @@ mod implementation { name_str, Fields::new(f.named.iter(), Some(syn::Token!(self)(Span::call_site()))), ), - syn::Fields::Unnamed(ref f) => { - derive_fields(name_str, Fields::new(f.unnamed.iter(), None)) - }, + syn::Fields::Unnamed(ref f) => + derive_fields(name_str, Fields::new(f.unnamed.iter(), None)), syn::Fields::Unit => derive_fields(name_str, Fields::Indexed { indices: vec![] }), } } diff --git a/primitives/finality-grandpa/src/lib.rs b/primitives/finality-grandpa/src/lib.rs index f19c3a0125645..f92fee4f12963 100644 --- a/primitives/finality-grandpa/src/lib.rs +++ b/primitives/finality-grandpa/src/lib.rs @@ -332,10 +332,10 @@ where macro_rules! check { ( $equivocation:expr, $message:expr ) => { // if both votes have the same target the equivocation is invalid. - if $equivocation.first.0.target_hash == $equivocation.second.0.target_hash - && $equivocation.first.0.target_number == $equivocation.second.0.target_number + if $equivocation.first.0.target_hash == $equivocation.second.0.target_hash && + $equivocation.first.0.target_number == $equivocation.second.0.target_number { - return false; + return false } // check signatures on both votes are valid @@ -514,7 +514,7 @@ impl<'a> Decode for VersionedAuthorityList<'a> { fn decode(value: &mut I) -> Result { let (version, authorities): (u8, AuthorityList) = Decode::decode(value)?; if version != AUTHORITIES_VERSION { - return Err("unknown Grandpa authorities version".into()); + return Err("unknown Grandpa authorities version".into()) } Ok(authorities.into()) } diff --git a/primitives/inherents/src/lib.rs b/primitives/inherents/src/lib.rs index 6dfeb70200743..a3ef963c47b39 100644 --- a/primitives/inherents/src/lib.rs +++ b/primitives/inherents/src/lib.rs @@ -313,7 +313,7 @@ impl CheckInherentsResult { ) -> Result<(), Error> { // Don't accept any other error if self.fatal_error { - return Err(Error::FatalErrorReported); + return Err(Error::FatalErrorReported) } if error.is_fatal_error() { @@ -361,9 +361,9 @@ impl CheckInherentsResult { #[cfg(feature = "std")] impl PartialEq for CheckInherentsResult { fn eq(&self, other: &Self) -> bool { - self.fatal_error == other.fatal_error - && self.okay == other.okay - && self.errors.data == other.errors.data + self.fatal_error == other.fatal_error && + self.okay == other.okay && + self.errors.data == other.errors.data } } diff --git a/primitives/io/src/batch_verifier.rs b/primitives/io/src/batch_verifier.rs index 430235acbb2da..501d986758bf2 100644 --- a/primitives/io/src/batch_verifier.rs +++ b/primitives/io/src/batch_verifier.rs @@ -65,7 +65,7 @@ impl BatchVerifier { ) -> bool { // there is already invalid transaction encountered if self.invalid.load(AtomicOrdering::Relaxed) { - return false; + return false } let invalid_clone = self.invalid.clone(); @@ -118,7 +118,7 @@ impl BatchVerifier { message: Vec, ) -> bool { if self.invalid.load(AtomicOrdering::Relaxed) { - return false; + return false } self.sr25519_items.push(Sr25519BatchItem { signature, pub_key, message }); @@ -172,7 +172,7 @@ impl BatchVerifier { ); if !Self::verify_sr25519_batch(std::mem::take(&mut self.sr25519_items)) { - return false; + return false } if pending.len() > 0 { @@ -196,7 +196,7 @@ impl BatchVerifier { "Haven't received async result from verification task. Returning false.", ); - return false; + return false } } diff --git a/primitives/keystore/src/lib.rs b/primitives/keystore/src/lib.rs index 8e6ba8a4aa9b1..6540e71bc3fe0 100644 --- a/primitives/keystore/src/lib.rs +++ b/primitives/keystore/src/lib.rs @@ -140,11 +140,11 @@ pub trait CryptoStore: Send + Sync { msg: &[u8], ) -> Result)>, Error> { if keys.len() == 1 { - return Ok(self.sign_with(id, &keys[0], msg).await?.map(|s| (keys[0].clone(), s))); + return Ok(self.sign_with(id, &keys[0], msg).await?.map(|s| (keys[0].clone(), s))) } else { for k in self.supported_keys(id, keys).await? { if let Ok(Some(sign)) = self.sign_with(id, &k, msg).await { - return Ok(Some((k, sign))); + return Ok(Some((k, sign))) } } } @@ -317,11 +317,11 @@ pub trait SyncCryptoStore: CryptoStore + Send + Sync { if keys.len() == 1 { return Ok( SyncCryptoStore::sign_with(self, id, &keys[0], msg)?.map(|s| (keys[0].clone(), s)) - ); + ) } else { for k in SyncCryptoStore::supported_keys(self, id, keys)? { if let Ok(Some(sign)) = SyncCryptoStore::sign_with(self, id, &k, msg) { - return Ok(Some((k, sign))); + return Ok(Some((k, sign))) } } } diff --git a/primitives/keystore/src/testing.rs b/primitives/keystore/src/testing.rs index b74236acbb236..c111fffb7246c 100644 --- a/primitives/keystore/src/testing.rs +++ b/primitives/keystore/src/testing.rs @@ -371,7 +371,7 @@ impl SyncCryptoStore for KeyStore { let pair = if let Some(k) = self.sr25519_key_pair(key_type, public) { k } else { - return Ok(None); + return Ok(None) }; let (inout, proof, _) = pair.as_ref().vrf_sign(transcript); diff --git a/primitives/maybe-compressed-blob/src/lib.rs b/primitives/maybe-compressed-blob/src/lib.rs index 6fd29b966c950..99c12ed39bc04 100644 --- a/primitives/maybe-compressed-blob/src/lib.rs +++ b/primitives/maybe-compressed-blob/src/lib.rs @@ -85,7 +85,7 @@ pub fn decompress(blob: &[u8], bomb_limit: usize) -> Result, Error> { /// able to differentiate it from a compression bomb. pub fn compress(blob: &[u8], bomb_limit: usize) -> Option> { if blob.len() > bomb_limit { - return None; + return None } let mut buf = ZSTD_PREFIX.to_vec(); diff --git a/primitives/npos-elections/fuzzer/src/common.rs b/primitives/npos-elections/fuzzer/src/common.rs index 8f72928f49b04..ad9bd43f9bce0 100644 --- a/primitives/npos-elections/fuzzer/src/common.rs +++ b/primitives/npos-elections/fuzzer/src/common.rs @@ -158,12 +158,10 @@ pub fn generate_random_npos_result( ( match election_type { - ElectionType::Phragmen(conf) => { - seq_phragmen(to_elect, candidates.clone(), voters.clone(), conf).unwrap() - }, - ElectionType::Phragmms(conf) => { - phragmms(to_elect, candidates.clone(), voters.clone(), conf).unwrap() - }, + ElectionType::Phragmen(conf) => + seq_phragmen(to_elect, candidates.clone(), voters.clone(), conf).unwrap(), + ElectionType::Phragmms(conf) => + phragmms(to_elect, candidates.clone(), voters.clone(), conf).unwrap(), }, candidates, voters, diff --git a/primitives/npos-elections/fuzzer/src/phragmen_balancing.rs b/primitives/npos-elections/fuzzer/src/phragmen_balancing.rs index c4544557f09e4..e053f9aa0cddd 100644 --- a/primitives/npos-elections/fuzzer/src/phragmen_balancing.rs +++ b/primitives/npos-elections/fuzzer/src/phragmen_balancing.rs @@ -60,7 +60,7 @@ fn main() { if score.minimal_stake == 0 { // such cases cannot be improved by balancing. - return; + return } score }; @@ -88,9 +88,9 @@ fn main() { // The only guarantee of balancing is such that the first and third element of the // score cannot decrease. assert!( - balanced_score.minimal_stake >= unbalanced_score.minimal_stake - && balanced_score.sum_stake == unbalanced_score.sum_stake - && balanced_score.sum_stake_squared <= unbalanced_score.sum_stake_squared + balanced_score.minimal_stake >= unbalanced_score.minimal_stake && + balanced_score.sum_stake == unbalanced_score.sum_stake && + balanced_score.sum_stake_squared <= unbalanced_score.sum_stake_squared ); } }); diff --git a/primitives/npos-elections/fuzzer/src/phragmms_balancing.rs b/primitives/npos-elections/fuzzer/src/phragmms_balancing.rs index 6af4f6492ded3..3f114674e29d9 100644 --- a/primitives/npos-elections/fuzzer/src/phragmms_balancing.rs +++ b/primitives/npos-elections/fuzzer/src/phragmms_balancing.rs @@ -60,7 +60,7 @@ fn main() { if score.minimal_stake == 0 { // such cases cannot be improved by balancing. - return; + return } score }; @@ -85,9 +85,9 @@ fn main() { // The only guarantee of balancing is such that the first and third element of the score // cannot decrease. assert!( - balanced_score.minimal_stake >= unbalanced_score.minimal_stake - && balanced_score.sum_stake == unbalanced_score.sum_stake - && balanced_score.sum_stake_squared <= unbalanced_score.sum_stake_squared + balanced_score.minimal_stake >= unbalanced_score.minimal_stake && + balanced_score.sum_stake == unbalanced_score.sum_stake && + balanced_score.sum_stake_squared <= unbalanced_score.sum_stake_squared ); }); } diff --git a/primitives/npos-elections/src/balancing.rs b/primitives/npos-elections/src/balancing.rs index a71e5b08bf536..4a713658ad38f 100644 --- a/primitives/npos-elections/src/balancing.rs +++ b/primitives/npos-elections/src/balancing.rs @@ -60,7 +60,7 @@ pub fn balance( config: &BalancingConfig, ) -> usize { if config.iterations == 0 { - return 0; + return 0 } let mut iter = 0; @@ -75,7 +75,7 @@ pub fn balance( iter += 1; if max_diff <= config.tolerance || iter >= config.iterations { - break iter; + break iter } } } @@ -94,7 +94,7 @@ pub(crate) fn balance_voter( // Either empty, or a self vote. Not much to do in either case. if elected_edges.len() <= 1 { - return Zero::zero(); + return Zero::zero() } // amount of stake from this voter that is used in edges. @@ -125,7 +125,7 @@ pub(crate) fn balance_voter( let mut difference = max_stake.saturating_sub(*min_stake); difference = difference.saturating_add(voter.budget.saturating_sub(stake_used)); if difference < tolerance { - return difference; + return difference } difference } else { @@ -151,7 +151,7 @@ pub(crate) fn balance_voter( if temp.saturating_sub(cumulative_backed_stake) > voter.budget { // defensive only. length of elected_edges is checked to be above 1. last_index = index.saturating_sub(1) as usize; - break; + break } cumulative_backed_stake = cumulative_backed_stake.saturating_add(backed_stake); } diff --git a/primitives/npos-elections/src/lib.rs b/primitives/npos-elections/src/lib.rs index 15896775bc566..d0c9ed18caddc 100644 --- a/primitives/npos-elections/src/lib.rs +++ b/primitives/npos-elections/src/lib.rs @@ -553,7 +553,7 @@ pub fn setup_inputs( for v in votes { if edges.iter().any(|e| e.who == v) { // duplicate edge. - continue; + continue } if let Some(idx) = c_idx_cache.get(&v) { // This candidate is valid + already cached. diff --git a/primitives/npos-elections/src/mock.rs b/primitives/npos-elections/src/mock.rs index 1a069bb5d5e98..5a06e3f3c88ca 100644 --- a/primitives/npos-elections/src/mock.rs +++ b/primitives/npos-elections/src/mock.rs @@ -145,7 +145,7 @@ where elected_candidates.push((winner.who, winner.approval_stake as ExtendedBalance)); } else { - break; + break } } @@ -190,7 +190,7 @@ pub(crate) fn equalize_float( } if max_diff < tolerance { - break; + break } } } @@ -207,7 +207,7 @@ where { let budget = budget_balance as f64; if elected_edges.is_empty() { - return 0.0; + return 0.0 } let stake_used = elected_edges.iter().fold(0.0, |s, e| s + e.1); @@ -235,7 +235,7 @@ where difference = max_stake - min_stake; difference = difference + budget - stake_used; if difference < tolerance { - return difference; + return difference } } else { difference = budget; @@ -266,7 +266,7 @@ where let stake_sub = stake_mul - cumulative_stake; if stake_sub > budget { last_index = idx.checked_sub(1).unwrap_or(0); - return; + return } cumulative_stake = cumulative_stake + stake; } diff --git a/primitives/npos-elections/src/node.rs b/primitives/npos-elections/src/node.rs index dbba156803450..6642a9ae39736 100644 --- a/primitives/npos-elections/src/node.rs +++ b/primitives/npos-elections/src/node.rs @@ -93,7 +93,7 @@ impl Node { /// Returns true if `other` is the parent of `who`. pub fn is_parent_of(who: &NodeRef, other: &NodeRef) -> bool { if who.borrow().parent.is_none() { - return false; + return false } who.borrow().parent.as_ref() == Some(other) } @@ -127,7 +127,7 @@ impl Node { while let Some(ref next_parent) = current.clone().borrow().parent { if visited.contains(next_parent) { - break; + break } parent_path.push(next_parent.clone()); current = next_parent.clone(); diff --git a/primitives/npos-elections/src/phragmen.rs b/primitives/npos-elections/src/phragmen.rs index f69d436e20625..ca32780ed84b4 100644 --- a/primitives/npos-elections/src/phragmen.rs +++ b/primitives/npos-elections/src/phragmen.rs @@ -176,7 +176,7 @@ pub fn seq_phragmen_core( } } } else { - break; + break } } diff --git a/primitives/npos-elections/src/phragmms.rs b/primitives/npos-elections/src/phragmms.rs index 7becbb2b0543c..3fbbad75e2f8f 100644 --- a/primitives/npos-elections/src/phragmms.rs +++ b/primitives/npos-elections/src/phragmms.rs @@ -62,7 +62,7 @@ pub fn phragmms( balance(&mut voters, config); } } else { - break; + break } } diff --git a/primitives/npos-elections/src/pjr.rs b/primitives/npos-elections/src/pjr.rs index c982945926496..fd7c8ef539241 100644 --- a/primitives/npos-elections/src/pjr.rs +++ b/primitives/npos-elections/src/pjr.rs @@ -51,8 +51,8 @@ pub fn standard_threshold( ) -> Threshold { weights .into_iter() - .fold(Threshold::zero(), |acc, elem| acc.saturating_add(elem)) - / committee_size.max(1) as Threshold + .fold(Threshold::zero(), |acc, elem| acc.saturating_add(elem)) / + committee_size.max(1) as Threshold } /// Check a solution to be PJR. @@ -307,7 +307,7 @@ fn prepare_pjr_input( for t in ts { if edges.iter().any(|e| e.who == t) { // duplicate edge. - continue; + continue } if let Some(idx) = candidates_index.get(&t) { diff --git a/primitives/npos-elections/src/reduce.rs b/primitives/npos-elections/src/reduce.rs index f8ec95ca12e1d..c802a29504709 100644 --- a/primitives/npos-elections/src/reduce.rs +++ b/primitives/npos-elections/src/reduce.rs @@ -64,7 +64,7 @@ type Map = BTreeMap<(A, A), A>; fn combinations_2(input: &[T]) -> Vec<(T, T)> { let n = input.len(); if n < 2 { - return Default::default(); + return Default::default() } let mut comb = Vec::with_capacity(n * (n - 1) / 2); @@ -142,13 +142,13 @@ fn reduce_4(assignments: &mut Vec>) -> u32 { .filter(|(t, _)| *t == v1 || *t == v2) .count() != 2 { - continue; + continue } // check if other_who voted for the same pair v1, v2. let maybe_other_assignments = assignments.iter().find(|a| a.who == *other_who); if maybe_other_assignments.is_none() { - continue; + continue } let other_assignment = maybe_other_assignments.expect("value is checked to be 'Some'"); @@ -177,7 +177,7 @@ fn reduce_4(assignments: &mut Vec>) -> u32 { if other_votes_count < 2 { // This is not a cycle. Replace and continue. *other_who = who.clone(); - continue; + continue } else if other_votes_count == 2 { // This is a cycle. let mut who_cycle_votes: Vec<(A, ExtendedBalance)> = Vec::with_capacity(2); @@ -188,7 +188,7 @@ fn reduce_4(assignments: &mut Vec>) -> u32 { }); if who_cycle_votes.len() != 2 { - continue; + continue } // Align the targets similarly. This helps with the circulation below. @@ -334,7 +334,7 @@ fn reduce_all(assignments: &mut Vec>) -> u32 let maybe_dist = assignments[assignment_index].distribution.get(dist_index); if maybe_dist.is_none() { // The rest of this loop is moot. - break; + break } let (target, _) = maybe_dist.expect("Value checked to be some").clone(); @@ -361,17 +361,17 @@ fn reduce_all(assignments: &mut Vec>) -> u32 (false, false) => { Node::set_parent_of(&target_node, &voter_node); dist_index += 1; - continue; + continue }, (false, true) => { Node::set_parent_of(&voter_node, &target_node); dist_index += 1; - continue; + continue }, (true, false) => { Node::set_parent_of(&target_node, &voter_node); dist_index += 1; - continue; + continue }, (true, true) => { /* don't continue and execute the rest */ }, }; @@ -492,7 +492,7 @@ fn reduce_all(assignments: &mut Vec>) -> u32 ) => (min_value, min_target, min_voter, min_index, min_direction), _ => { sp_runtime::print("UNREACHABLE code reached in `reduce` algorithm. This must be a bug."); - break; + break }, }; @@ -608,7 +608,7 @@ fn reduce_all(assignments: &mut Vec>) -> u32 let current = voter_root_path[i].clone().borrow().id.who.clone(); let next = voter_root_path[i + 1].clone().borrow().id.who.clone(); if min_edge.contains(¤t) && min_edge.contains(&next) { - break; + break } Node::set_parent_of(&voter_root_path[i + 1], &voter_root_path[i]); } @@ -619,7 +619,7 @@ fn reduce_all(assignments: &mut Vec>) -> u32 let current = target_root_path[i].clone().borrow().id.who.clone(); let next = target_root_path[i + 1].clone().borrow().id.who.clone(); if min_edge.contains(¤t) && min_edge.contains(&next) { - break; + break } Node::set_parent_of(&target_root_path[i + 1], &target_root_path[i]); } diff --git a/primitives/runtime-interface/proc-macro/src/lib.rs b/primitives/runtime-interface/proc-macro/src/lib.rs index 0be5b6f77fe68..afba38993fe76 100644 --- a/primitives/runtime-interface/proc-macro/src/lib.rs +++ b/primitives/runtime-interface/proc-macro/src/lib.rs @@ -66,7 +66,7 @@ impl Parse for Options { } else if lookahead.peek(Token![,]) { let _ = input.parse::(); } else { - return Err(lookahead.error()); + return Err(lookahead.error()) } } Ok(res) diff --git a/primitives/runtime-interface/proc-macro/src/pass_by/enum_.rs b/primitives/runtime-interface/proc-macro/src/pass_by/enum_.rs index 4748f3bc5619e..e25295fdca5cb 100644 --- a/primitives/runtime-interface/proc-macro/src/pass_by/enum_.rs +++ b/primitives/runtime-interface/proc-macro/src/pass_by/enum_.rs @@ -81,7 +81,7 @@ pub fn derive_impl(input: DeriveInput) -> Result { /// enum or a variant is not an unit. fn get_enum_field_idents(data: &Data) -> Result>> { match data { - Data::Enum(d) => { + Data::Enum(d) => if d.variants.len() <= 256 { Ok(d.variants.iter().map(|v| { if let Fields::Unit = v.fields { @@ -95,8 +95,7 @@ fn get_enum_field_idents(data: &Data) -> Result Err(Error::new(Span::call_site(), "`PassByEnum` only supports enums as input type.")), } } diff --git a/primitives/runtime-interface/proc-macro/src/pass_by/inner.rs b/primitives/runtime-interface/proc-macro/src/pass_by/inner.rs index 1632286c3938c..7a527af129467 100644 --- a/primitives/runtime-interface/proc-macro/src/pass_by/inner.rs +++ b/primitives/runtime-interface/proc-macro/src/pass_by/inner.rs @@ -93,11 +93,11 @@ fn extract_inner_ty_and_name(data: &Data) -> Result<(Type, Option)> { match struct_data.fields { Fields::Named(ref named) if named.named.len() == 1 => { let field = &named.named[0]; - return Ok((field.ty.clone(), field.ident.clone())); + return Ok((field.ty.clone(), field.ident.clone())) }, Fields::Unnamed(ref unnamed) if unnamed.unnamed.len() == 1 => { let field = &unnamed.unnamed[0]; - return Ok((field.ty.clone(), field.ident.clone())); + return Ok((field.ty.clone(), field.ident.clone())) }, _ => {}, } diff --git a/primitives/runtime-interface/proc-macro/src/runtime_interface/host_function_interface.rs b/primitives/runtime-interface/proc-macro/src/runtime_interface/host_function_interface.rs index 2b746c0b54798..03da0bed59815 100644 --- a/primitives/runtime-interface/proc-macro/src/runtime_interface/host_function_interface.rs +++ b/primitives/runtime-interface/proc-macro/src/runtime_interface/host_function_interface.rs @@ -428,15 +428,14 @@ fn generate_wasm_interface_signature_for_host_function(sig: &Signature) -> Resul /// Generate the variable name that stores the FFI value. fn generate_ffi_value_var_name(pat: &Pat) -> Result { match pat { - Pat::Ident(pat_ident) => { + Pat::Ident(pat_ident) => if let Some(by_ref) = pat_ident.by_ref { Err(Error::new(by_ref.span(), "`ref` not supported!")) } else if let Some(sub_pattern) = &pat_ident.subpat { Err(Error::new(sub_pattern.0.span(), "Not supported!")) } else { Ok(Ident::new(&format!("{}_ffi_value", pat_ident.ident), Span::call_site())) - } - }, + }, _ => Err(Error::new(pat.span(), "Not supported as variable name!")), } } diff --git a/primitives/runtime-interface/proc-macro/src/runtime_interface/trait_decl_impl.rs b/primitives/runtime-interface/proc-macro/src/runtime_interface/trait_decl_impl.rs index 8e0ec7bd4b036..0ae0f5260286c 100644 --- a/primitives/runtime-interface/proc-macro/src/runtime_interface/trait_decl_impl.rs +++ b/primitives/runtime-interface/proc-macro/src/runtime_interface/trait_decl_impl.rs @@ -130,7 +130,7 @@ fn declare_essential_trait(trait_def: &ItemTrait) -> Result { let trait_ = &trait_def.ident; if let Some(param) = trait_def.generics.params.first() { - return Err(Error::new(param.span(), "Generic parameters not supported.")); + return Err(Error::new(param.span(), "Generic parameters not supported.")) } let interface = get_runtime_interface(trait_def)?; diff --git a/primitives/runtime-interface/proc-macro/src/utils.rs b/primitives/runtime-interface/proc-macro/src/utils.rs index 18937ac05d2ca..386eef153f45c 100644 --- a/primitives/runtime-interface/proc-macro/src/utils.rs +++ b/primitives/runtime-interface/proc-macro/src/utils.rs @@ -69,7 +69,7 @@ impl RuntimeInterfaceFunction { return Err(Error::new( item.sig.ident.span(), "Methods marked as #[trap_on_return] cannot return anything", - )); + )) } Ok(Self { item, should_trap_on_return }) @@ -127,13 +127,13 @@ impl RuntimeInterfaceFunctionSet { "Previous version with the same number defined here", )); - return Err(err); + return Err(err) } self.versions .insert(version.version, RuntimeInterfaceFunction::new(trait_item)?); - if self.latest_version_to_call.map_or(true, |v| v < version.version) - && version.is_callable() + if self.latest_version_to_call.map_or(true, |v| v < version.version) && + version.is_callable() { self.latest_version_to_call = Some(version.version); } @@ -315,7 +315,7 @@ impl Parse for VersionAttribute { Some(input.parse()?) } else { if !input.is_empty() { - return Err(Error::new(input.span(), "Unexpected token, expected `,`.")); + return Err(Error::new(input.span(), "Unexpected token, expected `,`.")) } None @@ -343,7 +343,7 @@ pub fn get_runtime_interface(trait_def: &ItemTrait) -> Result let version = get_item_version(item)?.unwrap_or_default(); if version.version < 1 { - return Err(Error::new(item.span(), "Version needs to be at least `1`.")); + return Err(Error::new(item.span(), "Version needs to be at least `1`.")) } match functions.entry(name.clone()) { @@ -366,7 +366,7 @@ pub fn get_runtime_interface(trait_def: &ItemTrait) -> Result "Unexpected version attribute: missing version '{}' for this function", next_expected ), - )); + )) } next_expected += 1; } diff --git a/primitives/runtime-interface/src/impls.rs b/primitives/runtime-interface/src/impls.rs index 417dffb54d57b..e801931c306cf 100644 --- a/primitives/runtime-interface/src/impls.rs +++ b/primitives/runtime-interface/src/impls.rs @@ -196,7 +196,7 @@ impl FromFFIValue for Vec { let len = len as usize; if len == 0 { - return Vec::new(); + return Vec::new() } let data = unsafe { Vec::from_raw_parts(ptr as *mut u8, len, len) }; diff --git a/primitives/runtime-interface/tests/ui.rs b/primitives/runtime-interface/tests/ui.rs index 64de3311f5214..f3d6aa59a0336 100644 --- a/primitives/runtime-interface/tests/ui.rs +++ b/primitives/runtime-interface/tests/ui.rs @@ -22,7 +22,7 @@ use std::env; fn ui() { // Only run the ui tests when `RUN_UI_TESTS` is set. if env::var("RUN_UI_TESTS").is_err() { - return; + return } // As trybuild is using `cargo check`, we don't need the real WASM binaries. diff --git a/primitives/runtime/src/curve.rs b/primitives/runtime/src/curve.rs index b35cc41149d36..c040b7cf517e0 100644 --- a/primitives/runtime/src/curve.rs +++ b/primitives/runtime/src/curve.rs @@ -75,7 +75,7 @@ impl<'a> PiecewiseLinear<'a> { let n = n.min(d.clone()); if self.points.is_empty() { - return N::zero(); + return N::zero() } let next_point_index = self.points.iter().position(|p| n < p.0 * d.clone()); @@ -85,11 +85,11 @@ impl<'a> PiecewiseLinear<'a> { (self.points[previous_point_index], self.points[next_point_index]) } else { // There is no previous points, take first point ordinate - return self.points.first().map(|p| p.1).unwrap_or_else(Perbill::zero) * d; + return self.points.first().map(|p| p.1).unwrap_or_else(Perbill::zero) * d } } else { // There is no next points, take last point ordinate - return self.points.last().map(|p| p.1).unwrap_or_else(Perbill::zero) * d; + return self.points.last().map(|p| p.1).unwrap_or_else(Perbill::zero) * d }; let delta_y = multiply_by_rational_saturating( diff --git a/primitives/runtime/src/generic/digest.rs b/primitives/runtime/src/generic/digest.rs index d431af2d912a4..ec74ebb0d4e15 100644 --- a/primitives/runtime/src/generic/digest.rs +++ b/primitives/runtime/src/generic/digest.rs @@ -354,13 +354,11 @@ impl<'a> DigestItemRef<'a> { /// return the opaque data it contains. pub fn try_as_raw(&self, id: OpaqueDigestItemId) -> Option<&'a [u8]> { match (id, self) { - (OpaqueDigestItemId::Consensus(w), &Self::Consensus(v, s)) - | (OpaqueDigestItemId::Seal(w), &Self::Seal(v, s)) - | (OpaqueDigestItemId::PreRuntime(w), &Self::PreRuntime(v, s)) + (OpaqueDigestItemId::Consensus(w), &Self::Consensus(v, s)) | + (OpaqueDigestItemId::Seal(w), &Self::Seal(v, s)) | + (OpaqueDigestItemId::PreRuntime(w), &Self::PreRuntime(v, s)) if v == w => - { - Some(s) - }, + Some(s), (OpaqueDigestItemId::Other, &Self::Other(s)) => Some(s), _ => None, } @@ -466,18 +464,14 @@ mod tests { let check = |digest_item_type: DigestItemType| { let (variant_name, digest_item) = match digest_item_type { DigestItemType::Other => ("Other", DigestItem::Other(Default::default())), - DigestItemType::Consensus => { - ("Consensus", DigestItem::Consensus(Default::default(), Default::default())) - }, - DigestItemType::Seal => { - ("Seal", DigestItem::Seal(Default::default(), Default::default())) - }, - DigestItemType::PreRuntime => { - ("PreRuntime", DigestItem::PreRuntime(Default::default(), Default::default())) - }, - DigestItemType::RuntimeEnvironmentUpdated => { - ("RuntimeEnvironmentUpdated", DigestItem::RuntimeEnvironmentUpdated) - }, + DigestItemType::Consensus => + ("Consensus", DigestItem::Consensus(Default::default(), Default::default())), + DigestItemType::Seal => + ("Seal", DigestItem::Seal(Default::default(), Default::default())), + DigestItemType::PreRuntime => + ("PreRuntime", DigestItem::PreRuntime(Default::default(), Default::default())), + DigestItemType::RuntimeEnvironmentUpdated => + ("RuntimeEnvironmentUpdated", DigestItem::RuntimeEnvironmentUpdated), }; let encoded = digest_item.encode(); let variant = variants diff --git a/primitives/runtime/src/generic/era.rs b/primitives/runtime/src/generic/era.rs index 6730c97b6fac3..b26545fb8404e 100644 --- a/primitives/runtime/src/generic/era.rs +++ b/primitives/runtime/src/generic/era.rs @@ -105,8 +105,8 @@ impl Encode for Era { Self::Immortal => output.push_byte(0), Self::Mortal(period, phase) => { let quantize_factor = (*period as u64 >> 12).max(1); - let encoded = (period.trailing_zeros() - 1).clamp(1, 15) as u16 - | ((phase / quantize_factor) << 4) as u16; + let encoded = (period.trailing_zeros() - 1).clamp(1, 15) as u16 | + ((phase / quantize_factor) << 4) as u16; encoded.encode_to(output); }, } diff --git a/primitives/runtime/src/generic/header.rs b/primitives/runtime/src/generic/header.rs index fbec1b2223e14..a7b43608f2b78 100644 --- a/primitives/runtime/src/generic/header.rs +++ b/primitives/runtime/src/generic/header.rs @@ -62,11 +62,11 @@ where Hash::Output: parity_util_mem::MallocSizeOf, { fn size_of(&self, ops: &mut parity_util_mem::MallocSizeOfOps) -> usize { - self.parent_hash.size_of(ops) - + self.number.size_of(ops) - + self.state_root.size_of(ops) - + self.extrinsics_root.size_of(ops) - + self.digest.size_of(ops) + self.parent_hash.size_of(ops) + + self.number.size_of(ops) + + self.state_root.size_of(ops) + + self.extrinsics_root.size_of(ops) + + self.digest.size_of(ops) } } diff --git a/primitives/runtime/src/generic/unchecked_extrinsic.rs b/primitives/runtime/src/generic/unchecked_extrinsic.rs index 98d4eed7b8f7c..fb333abd6ac6e 100644 --- a/primitives/runtime/src/generic/unchecked_extrinsic.rs +++ b/primitives/runtime/src/generic/unchecked_extrinsic.rs @@ -152,7 +152,7 @@ where let signed = lookup.lookup(signed)?; let raw_payload = SignedPayload::new(self.function, extra)?; if !raw_payload.using_encoded(|payload| signature.verify(payload, &signed)) { - return Err(InvalidTransaction::BadProof.into()); + return Err(InvalidTransaction::BadProof.into()) } let (function, extra, _) = raw_payload.deconstruct(); @@ -249,7 +249,7 @@ where let is_signed = version & 0b1000_0000 != 0; let version = version & 0b0111_1111; if version != EXTRINSIC_FORMAT_VERSION { - return Err("Invalid transaction version".into()); + return Err("Invalid transaction version".into()) } let signature = is_signed.then(|| Decode::decode(input)).transpose()?; @@ -261,7 +261,7 @@ where let length = before_length.saturating_sub(after_length); if length != expected_length.0 as usize { - return Err("Invalid length prefix".into()); + return Err("Invalid length prefix".into()) } } diff --git a/primitives/runtime/src/lib.rs b/primitives/runtime/src/lib.rs index 6b748854d57c8..3752e31cbeeb0 100644 --- a/primitives/runtime/src/lib.rs +++ b/primitives/runtime/src/lib.rs @@ -137,7 +137,7 @@ impl Justifications { /// not inserted. pub fn append(&mut self, justification: Justification) -> bool { if self.get(justification.0).is_some() { - return false; + return false } self.0.push(justification); true @@ -214,7 +214,7 @@ impl BuildStorage for sp_core::storage::Storage { if let Some(map) = storage.children_default.get_mut(&k) { map.data.extend(other_map.data.iter().map(|(k, v)| (k.clone(), v.clone()))); if !map.child_info.try_update(&other_map.child_info) { - return Err("Incompatible child info update".to_string()); + return Err("Incompatible child info update".to_string()) } } else { storage.children_default.insert(k, other_map.clone()); @@ -415,10 +415,9 @@ impl Verify for MultiSignature { (Self::Ecdsa(ref sig), who) => { let m = sp_io::hashing::blake2_256(msg.get()); match sp_io::crypto::secp256k1_ecdsa_recover_compressed(sig.as_ref(), &m) { - Ok(pubkey) => { - &sp_io::hashing::blake2_256(pubkey.as_ref()) - == >::as_ref(who) - }, + Ok(pubkey) => + &sp_io::hashing::blake2_256(pubkey.as_ref()) == + >::as_ref(who), _ => false, } }, @@ -437,8 +436,8 @@ impl Verify for AnySignature { let msg = msg.get(); sr25519::Signature::try_from(self.0.as_fixed_bytes().as_ref()) .map(|s| s.verify(msg, signer)) - .unwrap_or(false) - || ed25519::Signature::try_from(self.0.as_fixed_bytes().as_ref()) + .unwrap_or(false) || + ed25519::Signature::try_from(self.0.as_fixed_bytes().as_ref()) .map(|s| match ed25519::Public::from_slice(signer.as_ref()) { Err(()) => false, Ok(signer) => s.verify(msg, &signer), @@ -573,9 +572,8 @@ impl DispatchError { /// Return the same error but without the attached message. pub fn stripped(self) -> Self { match self { - DispatchError::Module(ModuleError { index, error, message: Some(_) }) => { - DispatchError::Module(ModuleError { index, error, message: None }) - }, + DispatchError::Module(ModuleError { index, error, message: Some(_) }) => + DispatchError::Module(ModuleError { index, error, message: None }), m => m, } } @@ -838,8 +836,8 @@ pub fn verify_encoded_lazy( macro_rules! assert_eq_error_rate { ($x:expr, $y:expr, $error:expr $(,)?) => { assert!( - ($x >= $crate::Saturating::saturating_sub($y, $error)) - && ($x <= $crate::Saturating::saturating_add($y, $error)), + ($x >= $crate::Saturating::saturating_sub($y, $error)) && + ($x <= $crate::Saturating::saturating_add($y, $error)), "{:?} != {:?} (with error rate {:?})", $x, $y, diff --git a/primitives/runtime/src/offchain/http.rs b/primitives/runtime/src/offchain/http.rs index 06f94707c9a93..dede4db5dd3de 100644 --- a/primitives/runtime/src/offchain/http.rs +++ b/primitives/runtime/src/offchain/http.rs @@ -404,7 +404,7 @@ impl Iterator for ResponseBody { fn next(&mut self) -> Option { if self.error.is_some() { - return None; + return None } if self.filled_up_to.is_none() { @@ -413,7 +413,7 @@ impl Iterator for ResponseBody { match result { Err(e) => { self.error = Some(e); - return None; + return None }, Ok(0) => return None, Ok(size) => { @@ -425,7 +425,7 @@ impl Iterator for ResponseBody { if Some(self.position) == self.filled_up_to { self.filled_up_to = None; - return self.next(); + return self.next() } let result = self.buffer[self.position]; @@ -452,7 +452,7 @@ impl Headers { let raw = name.as_bytes(); for &(ref key, ref val) in &self.raw { if &**key == raw { - return str::from_utf8(val).ok(); + return str::from_utf8(val).ok() } } None diff --git a/primitives/runtime/src/offchain/storage_lock.rs b/primitives/runtime/src/offchain/storage_lock.rs index c80367d10c3a5..47325743bd2f3 100644 --- a/primitives/runtime/src/offchain/storage_lock.rs +++ b/primitives/runtime/src/offchain/storage_lock.rs @@ -222,8 +222,8 @@ impl Lockable for BlockAndTime { type Deadline = BlockAndTimeDeadline; fn deadline(&self) -> Self::Deadline { - let block_number = ::current_block_number() - + self.expiration_block_number_offset.into(); + let block_number = ::current_block_number() + + self.expiration_block_number_offset.into(); BlockAndTimeDeadline { timestamp: offchain::timestamp().add(self.expiration_duration), block_number, @@ -231,8 +231,8 @@ impl Lockable for BlockAndTime { } fn has_expired(deadline: &Self::Deadline) -> bool { - offchain::timestamp() > deadline.timestamp - && ::current_block_number() > deadline.block_number + offchain::timestamp() > deadline.timestamp && + ::current_block_number() > deadline.block_number } fn snooze(deadline: &Self::Deadline) { diff --git a/primitives/runtime/src/traits.rs b/primitives/runtime/src/traits.rs index d766d21098988..1c48b1933431d 100644 --- a/primitives/runtime/src/traits.rs +++ b/primitives/runtime/src/traits.rs @@ -1422,7 +1422,7 @@ impl<'a, T: codec::Input> codec::Input for AppendZerosInput<'a, T> { into[i] = b; i += 1; } else { - break; + break } } i @@ -1539,7 +1539,7 @@ impl AccountIdConversion fo fn try_from_sub_account(x: &T) -> Option<(Self, S)> { x.using_encoded(|d| { if d[0..4] != Id::TYPE_ID { - return None; + return None } let mut cursor = &d[4..]; let result = Decode::decode(&mut cursor).ok()?; diff --git a/primitives/runtime/src/transaction_validity.rs b/primitives/runtime/src/transaction_validity.rs index 60a3415f65478..4646808b8c8e3 100644 --- a/primitives/runtime/src/transaction_validity.rs +++ b/primitives/runtime/src/transaction_validity.rs @@ -105,15 +105,12 @@ impl From for &'static str { InvalidTransaction::BadProof => "Transaction has a bad signature", InvalidTransaction::AncientBirthBlock => "Transaction has an ancient birth block", InvalidTransaction::ExhaustsResources => "Transaction would exhaust the block limits", - InvalidTransaction::Payment => { - "Inability to pay some fees (e.g. account balance too low)" - }, - InvalidTransaction::BadMandatory => { - "A call was labelled as mandatory, but resulted in an Error." - }, - InvalidTransaction::MandatoryDispatch => { - "Transaction dispatch is mandatory; transactions may not have mandatory dispatches." - }, + InvalidTransaction::Payment => + "Inability to pay some fees (e.g. account balance too low)", + InvalidTransaction::BadMandatory => + "A call was labelled as mandatory, but resulted in an Error.", + InvalidTransaction::MandatoryDispatch => + "Transaction dispatch is mandatory; transactions may not have mandatory dispatches.", InvalidTransaction::Custom(_) => "InvalidTransaction custom error", InvalidTransaction::BadSigner => "Invalid signing address", } @@ -135,12 +132,10 @@ pub enum UnknownTransaction { impl From for &'static str { fn from(unknown: UnknownTransaction) -> &'static str { match unknown { - UnknownTransaction::CannotLookup => { - "Could not lookup information required to validate the transaction" - }, - UnknownTransaction::NoUnsignedValidator => { - "Could not find an unsigned validator for the unsigned transaction" - }, + UnknownTransaction::CannotLookup => + "Could not lookup information required to validate the transaction", + UnknownTransaction::NoUnsignedValidator => + "Could not find an unsigned validator for the unsigned transaction", UnknownTransaction::Custom(_) => "UnknownTransaction custom error", } } diff --git a/primitives/sandbox/src/embedded_executor.rs b/primitives/sandbox/src/embedded_executor.rs index 7415d21f9c034..115c3192f3d89 100644 --- a/primitives/sandbox/src/embedded_executor.rs +++ b/primitives/sandbox/src/embedded_executor.rs @@ -176,7 +176,7 @@ impl ImportResolver for EnvironmentDefinitionBuilder { module_name, field_name, ); - return Err(wasmi::Error::Instantiation(String::new())); + return Err(wasmi::Error::Instantiation(String::new())) }, }; Ok(FuncInstance::alloc_host(signature.clone(), host_func_idx.0)) @@ -212,7 +212,7 @@ impl ImportResolver for EnvironmentDefinitionBuilder { module_name, field_name, ); - return Err(wasmi::Error::Instantiation(String::new())); + return Err(wasmi::Error::Instantiation(String::new())) }, }; Ok(memory.memref.clone()) @@ -315,7 +315,7 @@ mod tests { fn env_assert(_e: &mut State, args: &[Value]) -> Result { if args.len() != 1 { - return Err(HostError); + return Err(HostError) } let condition = args[0].as_i32().ok_or_else(|| HostError)?; if condition != 0 { @@ -326,7 +326,7 @@ mod tests { } fn env_inc_counter(e: &mut State, args: &[Value]) -> Result { if args.len() != 1 { - return Err(HostError); + return Err(HostError) } let inc_by = args[0].as_i32().ok_or_else(|| HostError)?; e.counter += inc_by as u32; @@ -335,7 +335,7 @@ mod tests { /// Function that takes one argument of any type and returns that value. fn env_polymorphic_id(_e: &mut State, args: &[Value]) -> Result { if args.len() != 1 { - return Err(HostError); + return Err(HostError) } Ok(ReturnValue::Value(args[0])) } diff --git a/primitives/state-machine/src/basic.rs b/primitives/state-machine/src/basic.rs index 47127c8382cd5..fdc50e3f8f207 100644 --- a/primitives/state-machine/src/basic.rs +++ b/primitives/state-machine/src/basic.rs @@ -121,14 +121,13 @@ impl BasicExternalities { impl PartialEq for BasicExternalities { fn eq(&self, other: &BasicExternalities) -> bool { - self.overlay.changes().map(|(k, v)| (k, v.value())).collect::>() - == other.overlay.changes().map(|(k, v)| (k, v.value())).collect::>() - && self - .overlay + self.overlay.changes().map(|(k, v)| (k, v.value())).collect::>() == + other.overlay.changes().map(|(k, v)| (k, v.value())).collect::>() && + self.overlay .children() .map(|(iter, i)| (i, iter.map(|(k, v)| (k, v.value())).collect::>())) - .collect::>() - == other + .collect::>() == + other .overlay .children() .map(|(iter, i)| { @@ -190,7 +189,7 @@ impl Externalities for BasicExternalities { fn place_storage(&mut self, key: StorageKey, maybe_value: Option) { if is_child_storage_key(&key) { warn!(target: "trie", "Refuse to set child storage key via main storage"); - return; + return } self.overlay.set_storage(key, maybe_value) @@ -227,7 +226,7 @@ impl Externalities for BasicExternalities { "Refuse to clear prefix that is part of child storage key via main storage" ); let maybe_cursor = Some(prefix.to_vec()); - return MultiRemovalResults { maybe_cursor, backend: 0, unique: 0, loops: 0 }; + return MultiRemovalResults { maybe_cursor, backend: 0, unique: 0, loops: 0 } } let count = self.overlay.clear_prefix(prefix); diff --git a/primitives/state-machine/src/ext.rs b/primitives/state-machine/src/ext.rs index f2eff9880583a..1db0ec517015b 100644 --- a/primitives/state-machine/src/ext.rs +++ b/primitives/state-machine/src/ext.rs @@ -321,11 +321,11 @@ where // If `backend_key` is less than the `overlay_key`, we found out next key. if cmp == Some(Ordering::Less) { - return next_backend_key; + return next_backend_key } else if overlay_key.1.value().is_some() { // If there exists a value for the `overlay_key` in the overlay // (aka the key is still valid), it means we have found our next key. - return Some(overlay_key.0.to_vec()); + return Some(overlay_key.0.to_vec()) } else if cmp == Some(Ordering::Equal) { // If the `backend_key` and `overlay_key` are equal, it means that we need // to search for the next backend key, because the overlay has overwritten @@ -362,11 +362,11 @@ where // If `backend_key` is less than the `overlay_key`, we found out next key. if cmp == Some(Ordering::Less) { - return next_backend_key; + return next_backend_key } else if overlay_key.1.value().is_some() { // If there exists a value for the `overlay_key` in the overlay // (aka the key is still valid), it means we have found our next key. - return Some(overlay_key.0.to_vec()); + return Some(overlay_key.0.to_vec()) } else if cmp == Some(Ordering::Equal) { // If the `backend_key` and `overlay_key` are equal, it means that we need // to search for the next backend key, because the overlay has overwritten @@ -391,7 +391,7 @@ where let _guard = guard(); if is_child_storage_key(&key) { warn!(target: "trie", "Refuse to directly set child storage key"); - return; + return } // NOTE: be careful about touching the key names – used outside substrate! @@ -472,7 +472,7 @@ where target: "trie", "Refuse to directly clear prefix that is part or contains of child storage key", ); - return MultiRemovalResults { maybe_cursor: None, backend: 0, unique: 0, loops: 0 }; + return MultiRemovalResults { maybe_cursor: None, backend: 0, unique: 0, loops: 0 } } self.mark_dirty(); @@ -538,7 +538,7 @@ where storage_root = %HexDisplay::from(&root.as_ref()), cached = true, ); - return root.encode(); + return root.encode() } let root = @@ -761,7 +761,7 @@ where .apply_to_keys_while(maybe_child, maybe_prefix, maybe_cursor, |key| { if maybe_limit.map_or(false, |limit| loop_count == limit) { maybe_next_key = Some(key.to_vec()); - return false; + return false } let overlay = match maybe_child { Some(child_info) => self.overlay.child_storage(child_info, key), diff --git a/primitives/state-machine/src/lib.rs b/primitives/state-machine/src/lib.rs index c04076937a98e..1f106593ede34 100644 --- a/primitives/state-machine/src/lib.rs +++ b/primitives/state-machine/src/lib.rs @@ -242,9 +242,8 @@ mod execution { /// Gets the corresponding manager for the execution strategy. pub fn get_manager(self) -> ExecutionManager> { match self { - ExecutionStrategy::AlwaysWasm => { - ExecutionManager::AlwaysWasm(BackendTrustLevel::Trusted) - }, + ExecutionStrategy::AlwaysWasm => + ExecutionManager::AlwaysWasm(BackendTrustLevel::Trusted), ExecutionStrategy::NativeWhenPossible => ExecutionManager::NativeWhenPossible, ExecutionStrategy::NativeElseWasm => ExecutionManager::NativeElseWasm, ExecutionStrategy::Both => ExecutionManager::Both(|wasm_result, native_result| { @@ -441,10 +440,9 @@ mod execution { self.overlay.rollback_transaction().expect(PROOF_CLOSE_TRANSACTION); let (wasm_result, _) = self.execute_aux(false); - if (result.is_ok() - && wasm_result.is_ok() - && result.as_ref().ok() == wasm_result.as_ref().ok()) - || result.is_err() && wasm_result.is_err() + if (result.is_ok() && + wasm_result.is_ok() && result.as_ref().ok() == wasm_result.as_ref().ok()) || + result.is_err() && wasm_result.is_err() { result } else { @@ -488,18 +486,15 @@ mod execution { { let result = { match manager { - ExecutionManager::Both(on_consensus_failure) => { - self.execute_call_with_both_strategy(on_consensus_failure) - }, - ExecutionManager::NativeElseWasm => { - self.execute_call_with_native_else_wasm_strategy() - }, + ExecutionManager::Both(on_consensus_failure) => + self.execute_call_with_both_strategy(on_consensus_failure), + ExecutionManager::NativeElseWasm => + self.execute_call_with_native_else_wasm_strategy(), ExecutionManager::AlwaysWasm(trust_level) => { let _abort_guard = match trust_level { BackendTrustLevel::Trusted => None, - BackendTrustLevel::Untrusted => { - Some(sp_panic_handler::AbortGuard::never_abort()) - }, + BackendTrustLevel::Untrusted => + Some(sp_panic_handler::AbortGuard::never_abort()), }; self.execute_aux(false).0 }, @@ -710,7 +705,7 @@ mod execution { last: &mut SmallVec<[Vec; 2]>, ) -> bool { if stopped_at == 0 || stopped_at > MAX_NESTED_TRIE_DEPTH { - return false; + return false } match stopped_at { 1 => { @@ -720,7 +715,7 @@ mod execution { match last.len() { 0 => { last.push(top_last); - return true; + return true }, 2 => { last.pop(); @@ -729,12 +724,12 @@ mod execution { } // update top trie access. last[0] = top_last; - return true; + return true } else { // No change in top trie accesses. // Indicates end of reading of a child trie. last.truncate(1); - return true; + return true } }, 2 => { @@ -748,7 +743,7 @@ mod execution { if let Some(top_last) = top_last { last.push(top_last) } else { - return false; + return false } } else if let Some(top_last) = top_last { last[0] = top_last; @@ -757,10 +752,10 @@ mod execution { last.pop(); } last.push(child_last); - return true; + return true } else { // stopped at level 2 so child last is define. - return false; + return false } }, _ => (), @@ -804,7 +799,7 @@ mod execution { H::Out: Ord + Codec, { if start_at.len() > MAX_NESTED_TRIE_DEPTH { - return Err(Box::new("Invalid start of range.")); + return Err(Box::new("Invalid start of range.")) } let recorder = sp_trie::recorder::Recorder::default(); @@ -821,7 +816,7 @@ mod execution { { child_roots.insert(state_root); } else { - return Err(Box::new("Invalid range start child trie key.")); + return Err(Box::new("Invalid range start child trie key.")) } (Some(storage_key), start_at.get(1).cloned()) @@ -834,9 +829,8 @@ mod execution { let storage_key = PrefixedStorageKey::new_ref(storage_key); ( Some(match ChildType::from_prefixed_key(storage_key) { - Some((ChildType::ParentKeyId, storage_key)) => { - ChildInfo::new_default(storage_key) - }, + Some((ChildType::ParentKeyId, storage_key)) => + ChildInfo::new_default(storage_key), None => return Err(Box::new("Invalid range start child trie key.")), }), 2, @@ -854,8 +848,8 @@ mod execution { None, start_at_ref, |key, value| { - if first - && start_at_ref + if first && + start_at_ref .as_ref() .map(|start| &key.as_slice() > start) .unwrap_or(true) @@ -865,8 +859,8 @@ mod execution { if first { true - } else if depth < MAX_NESTED_TRIE_DEPTH - && sp_core::storage::well_known_keys::is_child_storage_key( + } else if depth < MAX_NESTED_TRIE_DEPTH && + sp_core::storage::well_known_keys::is_child_storage_key( key.as_slice(), ) { count += 1; @@ -891,11 +885,11 @@ mod execution { if switch_child_key.is_none() { if depth == 1 { - break; + break } else if completed { start_at = child_key.take(); } else { - break; + break } } else { child_key = switch_child_key; @@ -1204,7 +1198,7 @@ mod execution { parent_storage_keys: Default::default(), }]; if start_at.len() > MAX_NESTED_TRIE_DEPTH { - return Err(Box::new("Invalid start of range.")); + return Err(Box::new("Invalid start of range.")) } let mut child_roots = HashSet::new(); @@ -1217,7 +1211,7 @@ mod execution { child_roots.insert(state_root.clone()); Some((storage_key, state_root)) } else { - return Err(Box::new("Invalid range start child trie key.")); + return Err(Box::new("Invalid range start child trie key.")) }; (child_key, start_at.get(1).cloned()) @@ -1236,9 +1230,8 @@ mod execution { let storage_key = PrefixedStorageKey::new_ref(storage_key); ( Some(match ChildType::from_prefixed_key(storage_key) { - Some((ChildType::ParentKeyId, storage_key)) => { - ChildInfo::new_default(storage_key) - }, + Some((ChildType::ParentKeyId, storage_key)) => + ChildInfo::new_default(storage_key), None => return Err(Box::new("Invalid range start child trie key.")), }), 2, @@ -1261,8 +1254,8 @@ mod execution { None, start_at_ref, |key, value| { - if first - && start_at_ref + if first && + start_at_ref .as_ref() .map(|start| &key.as_slice() > start) .unwrap_or(true) @@ -1275,8 +1268,8 @@ mod execution { } if first { true - } else if depth < MAX_NESTED_TRIE_DEPTH - && sp_core::storage::well_known_keys::is_child_storage_key( + } else if depth < MAX_NESTED_TRIE_DEPTH && + sp_core::storage::well_known_keys::is_child_storage_key( key.as_slice(), ) { if child_roots.contains(value.as_slice()) { @@ -1297,10 +1290,10 @@ mod execution { if switch_child_key.is_none() { if !completed { - break depth; + break depth } if depth == 1 { - break 0; + break 0 } else { start_at = child_key.take().map(|entry| entry.0); } @@ -1910,7 +1903,7 @@ mod tests { key.clone(), Some(value.clone()), )); - break; + break } } } @@ -2071,7 +2064,7 @@ mod tests { .unwrap(); if completed_depth == 0 { - break; + break } assert!(result.update_last_key(completed_depth, &mut start_at)); } diff --git a/primitives/state-machine/src/overlayed_changes/changeset.rs b/primitives/state-machine/src/overlayed_changes/changeset.rs index 835e8ab96c24b..e5dad7157c731 100644 --- a/primitives/state-machine/src/overlayed_changes/changeset.rs +++ b/primitives/state-machine/src/overlayed_changes/changeset.rs @@ -289,7 +289,7 @@ impl OverlayedMap { /// Calling this while already inside the runtime will return an error. pub fn enter_runtime(&mut self) -> Result<(), AlreadyInRuntime> { if let ExecutionMode::Runtime = self.execution_mode { - return Err(AlreadyInRuntime); + return Err(AlreadyInRuntime) } self.execution_mode = ExecutionMode::Runtime; self.num_client_transactions = self.transaction_depth(); @@ -302,7 +302,7 @@ impl OverlayedMap { /// Calling this while already outside the runtime will return an error. pub fn exit_runtime(&mut self) -> Result<(), NotInRuntime> { if let ExecutionMode::Client = self.execution_mode { - return Err(NotInRuntime); + return Err(NotInRuntime) } self.execution_mode = ExecutionMode::Client; if self.has_open_runtime_transactions() { @@ -349,7 +349,7 @@ impl OverlayedMap { // runtime is not allowed to close transactions started by the client if let ExecutionMode::Runtime = self.execution_mode { if !self.has_open_runtime_transactions() { - return Err(NoOpenTransaction); + return Err(NoOpenTransaction) } } diff --git a/primitives/state-machine/src/trie_backend.rs b/primitives/state-machine/src/trie_backend.rs index f91c50e32be13..da4250b6ba3e1 100644 --- a/primitives/state-machine/src/trie_backend.rs +++ b/primitives/state-machine/src/trie_backend.rs @@ -619,8 +619,8 @@ pub mod tests { .storage_root(iter::once((&b"new-key"[..], Some(&b"new-value"[..]))), state_version); assert!(!tx.drain().is_empty()); assert!( - new_root - != test_trie(state_version, None, None) + new_root != + test_trie(state_version, None, None) .storage_root(iter::empty(), state_version) .0 ); diff --git a/primitives/state-machine/src/trie_backend_essence.rs b/primitives/state-machine/src/trie_backend_essence.rs index f9d8f641913cc..cdd1bb0bba055 100644 --- a/primitives/state-machine/src/trie_backend_essence.rs +++ b/primitives/state-machine/src/trie_backend_essence.rs @@ -273,7 +273,7 @@ where #[cfg(feature = "std")] { if let Some(result) = self.cache.read().child_root.get(child_info.storage_key()) { - return Ok(*result); + return Ok(*result) } } @@ -469,7 +469,7 @@ where Ok(None) => return, Err(e) => { debug!(target: "trie", "Error while iterating child storage: {}", e); - return; + return }, } } else { @@ -492,7 +492,7 @@ where Ok(None) => return, Err(e) => { debug!(target: "trie", "Error while iterating child storage: {}", e); - return; + return }, }; @@ -538,9 +538,8 @@ where .build(); let prefix = maybe_prefix.unwrap_or(&[]); let iter = match maybe_start_at { - Some(start_at) => { - TrieDBKeyIterator::new_prefixed_then_seek(&trie, prefix, start_at) - }, + Some(start_at) => + TrieDBKeyIterator::new_prefixed_then_seek(&trie, prefix, start_at), None => TrieDBKeyIterator::new_prefixed(&trie, prefix), }?; @@ -553,7 +552,7 @@ where .unwrap_or(true)); if !f(&key) { - break; + break } } @@ -600,7 +599,7 @@ where debug_assert!(key.starts_with(prefix)); if !f(key, value) { - return Ok(false); + return Ok(false) } } @@ -616,9 +615,8 @@ where }; match result { Ok(completed) => Ok(completed), - Err(e) if matches!(*e, TrieError::IncompleteDatabase(_)) && allow_missing_nodes => { - Ok(false) - }, + Err(e) if matches!(*e, TrieError::IncompleteDatabase(_)) && allow_missing_nodes => + Ok(false), Err(e) => Err(format!("TrieDB iteration error: {}", e)), } } @@ -729,7 +727,7 @@ where self.with_recorder_and_cache_for_storage_root(Some(child_root), |recorder, cache| { let mut eph = Ephemeral::new(self.backend_storage(), &mut write_overlay); match match state_version { - StateVersion::V0 => { + StateVersion::V0 => child_delta_trie_root::, _, _, _, _, _, _>( child_info.keyspace(), &mut eph, @@ -737,9 +735,8 @@ where delta, recorder, cache, - ) - }, - StateVersion::V1 => { + ), + StateVersion::V1 => child_delta_trie_root::, _, _, _, _, _, _>( child_info.keyspace(), &mut eph, @@ -747,8 +744,7 @@ where delta, recorder, cache, - ) - }, + ), } { Ok(ret) => (Some(ret), ret), Err(e) => { @@ -880,7 +876,7 @@ impl, H: Hasher, C: AsLocalTrieCache + Send + Sync> { fn get(&self, key: &H::Out, prefix: Prefix) -> Option { if *key == self.empty { - return Some([0u8].to_vec()); + return Some([0u8].to_vec()) } match self.storage.get(key, prefix) { Ok(x) => x, diff --git a/primitives/storage/src/lib.rs b/primitives/storage/src/lib.rs index 1644cacbd0f8a..79c1012196bde 100644 --- a/primitives/storage/src/lib.rs +++ b/primitives/storage/src/lib.rs @@ -290,9 +290,8 @@ impl ChildInfo { /// this trie. pub fn prefixed_storage_key(&self) -> PrefixedStorageKey { match self { - ChildInfo::ParentKeyId(ChildTrieParentKeyId { data }) => { - ChildType::ParentKeyId.new_prefixed_key(data.as_slice()) - }, + ChildInfo::ParentKeyId(ChildTrieParentKeyId { data }) => + ChildType::ParentKeyId.new_prefixed_key(data.as_slice()), } } diff --git a/primitives/timestamp/src/lib.rs b/primitives/timestamp/src/lib.rs index cfb480b574b43..d88b1839babe6 100644 --- a/primitives/timestamp/src/lib.rs +++ b/primitives/timestamp/src/lib.rs @@ -241,7 +241,7 @@ impl sp_inherents::InherentDataProvider for InherentDataProvider { error: &[u8], ) -> Option> { if *identifier != INHERENT_IDENTIFIER { - return None; + return None } match InherentError::try_from(&INHERENT_IDENTIFIER, error)? { @@ -253,7 +253,7 @@ impl sp_inherents::InherentDataProvider for InherentDataProvider { if valid > timestamp + max_drift { return Some(Err(sp_inherents::Error::Application(Box::from( InherentError::TooFarInFuture, - )))); + )))) } let diff = valid.checked_sub(timestamp).unwrap_or_default(); diff --git a/primitives/transaction-storage-proof/src/lib.rs b/primitives/transaction-storage-proof/src/lib.rs index e4a9ce74763f3..fde84c1c58b1a 100644 --- a/primitives/transaction-storage-proof/src/lib.rs +++ b/primitives/transaction-storage-proof/src/lib.rs @@ -101,7 +101,7 @@ impl sp_inherents::InherentDataProvider for InherentDataProvider { mut error: &[u8], ) -> Option> { if *identifier != INHERENT_IDENTIFIER { - return None; + return None } let error = InherentError::decode(&mut error).ok()?; @@ -160,13 +160,12 @@ pub mod registration { .saturating_sub(DEFAULT_STORAGE_PERIOD.into()); if number.is_zero() { // Too early to collect proofs. - return Ok(InherentDataProvider::new(None)); + return Ok(InherentDataProvider::new(None)) } let proof = match client.block_indexed_body(number)? { - Some(transactions) if !transactions.is_empty() => { - Some(build_proof(parent.as_ref(), transactions)?) - }, + Some(transactions) if !transactions.is_empty() => + Some(build_proof(parent.as_ref(), transactions)?), Some(_) | None => { // Nothing was indexed in that block. None diff --git a/primitives/trie/src/cache/mod.rs b/primitives/trie/src/cache/mod.rs index 3b6420b7f9c62..85539cf626857 100644 --- a/primitives/trie/src/cache/mod.rs +++ b/primitives/trie/src/cache/mod.rs @@ -290,7 +290,7 @@ impl<'a, H: Hasher> trie_db::TrieCache> for TrieCache<'a, H> { if let Some(res) = self.shared_inner.node_cache().get(&hash) { tracing::trace!(target: LOG_TARGET, ?hash, "Serving node from shared cache"); self.shared_node_cache_access.insert(hash); - return Ok(res); + return Ok(res) } match self.local_cache.entry(hash) { @@ -317,7 +317,7 @@ impl<'a, H: Hasher> trie_db::TrieCache> for TrieCache<'a, H> { if let Some(node) = self.shared_inner.node_cache().get(hash) { tracing::trace!(target: LOG_TARGET, ?hash, "Getting node from shared cache"); self.shared_node_cache_access.insert(*hash); - return Some(node); + return Some(node) } let res = self.local_cache.get(hash); diff --git a/primitives/trie/src/cache/shared_cache.rs b/primitives/trie/src/cache/shared_cache.rs index e4284aa5bc74f..9d4d36b83a28a 100644 --- a/primitives/trie/src/cache/shared_cache.rs +++ b/primitives/trie/src/cache/shared_cache.rs @@ -287,11 +287,10 @@ impl PartialEq for ValueCacheKey<'_, H> { (Self::Hash { hash, .. }, _) => *hash == other.get_hash(), (_, Self::Hash { hash: other_hash, .. }) => self.get_hash() == *other_hash, // If both are not the `Hash` variant, we compare all the values. - _ => { - self.get_hash() == other.get_hash() - && self.storage_root() == other.storage_root() - && self.storage_key() == other.storage_key() - }, + _ => + self.get_hash() == other.get_hash() && + self.storage_root() == other.storage_root() && + self.storage_key() == other.storage_key(), } } } @@ -406,14 +405,12 @@ impl> SharedValueCache { "`SharedValueCached::update` was called with a key to add \ that uses the `Hash` variant. This would lead to potential hash collision!", ); - return; - }, - ValueCacheKey::Ref { storage_key, storage_root, hash } => { - (storage_root, storage_key.into(), hash) - }, - ValueCacheKey::Value { storage_root, storage_key, hash } => { - (storage_root, storage_key, hash) + return }, + ValueCacheKey::Ref { storage_key, storage_root, hash } => + (storage_root, storage_key.into(), hash), + ValueCacheKey::Value { storage_root, storage_key, hash } => + (storage_root, storage_key, hash), }; let (size_update, storage_key) = diff --git a/primitives/trie/src/node_codec.rs b/primitives/trie/src/node_codec.rs index 76cc28668d61a..f632320dd296d 100644 --- a/primitives/trie/src/node_codec.rs +++ b/primitives/trie/src/node_codec.rs @@ -43,7 +43,7 @@ impl<'a> ByteSliceInput<'a> { fn take(&mut self, count: usize) -> Result, codec::Error> { if self.offset + count > self.data.len() { - return Err("out of data".into()); + return Err("out of data".into()) } let range = self.offset..(self.offset + count); @@ -65,7 +65,7 @@ impl<'a> Input for ByteSliceInput<'a> { fn read_byte(&mut self) -> Result { if self.offset + 1 > self.data.len() { - return Err("out of data".into()); + return Err("out of data".into()) } let byte = self.data[self.offset]; @@ -111,11 +111,11 @@ where let padding = nibble_count % nibble_ops::NIBBLE_PER_BYTE != 0; // check that the padding is valid (if any) if padding && nibble_ops::pad_left(data[input.offset]) != 0 { - return Err(Error::BadFormat); + return Err(Error::BadFormat) } let partial = input.take( - (nibble_count + (nibble_ops::NIBBLE_PER_BYTE - 1)) - / nibble_ops::NIBBLE_PER_BYTE, + (nibble_count + (nibble_ops::NIBBLE_PER_BYTE - 1)) / + nibble_ops::NIBBLE_PER_BYTE, )?; let partial_padding = nibble_ops::number_padding(nibble_count); let bitmap_range = input.take(BITMAP_LENGTH)?; @@ -155,11 +155,11 @@ where let padding = nibble_count % nibble_ops::NIBBLE_PER_BYTE != 0; // check that the padding is valid (if any) if padding && nibble_ops::pad_left(data[input.offset]) != 0 { - return Err(Error::BadFormat); + return Err(Error::BadFormat) } let partial = input.take( - (nibble_count + (nibble_ops::NIBBLE_PER_BYTE - 1)) - / nibble_ops::NIBBLE_PER_BYTE, + (nibble_count + (nibble_ops::NIBBLE_PER_BYTE - 1)) / + nibble_ops::NIBBLE_PER_BYTE, )?; let partial_padding = nibble_ops::number_padding(nibble_count); let value = if contains_hash { @@ -228,15 +228,12 @@ where ) -> Vec { let contains_hash = matches!(&value, Some(Value::Node(..))); let mut output = match (&value, contains_hash) { - (&None, _) => { - partial_from_iterator_encode(partial, number_nibble, NodeKind::BranchNoValue) - }, - (_, false) => { - partial_from_iterator_encode(partial, number_nibble, NodeKind::BranchWithValue) - }, - (_, true) => { - partial_from_iterator_encode(partial, number_nibble, NodeKind::HashedValueBranch) - }, + (&None, _) => + partial_from_iterator_encode(partial, number_nibble, NodeKind::BranchNoValue), + (_, false) => + partial_from_iterator_encode(partial, number_nibble, NodeKind::BranchWithValue), + (_, true) => + partial_from_iterator_encode(partial, number_nibble, NodeKind::HashedValueBranch), }; let bitmap_index = output.len(); @@ -287,12 +284,10 @@ fn partial_from_iterator_encode>( NodeKind::Leaf => NodeHeader::Leaf(nibble_count).encode_to(&mut output), NodeKind::BranchWithValue => NodeHeader::Branch(true, nibble_count).encode_to(&mut output), NodeKind::BranchNoValue => NodeHeader::Branch(false, nibble_count).encode_to(&mut output), - NodeKind::HashedValueLeaf => { - NodeHeader::HashedValueLeaf(nibble_count).encode_to(&mut output) - }, - NodeKind::HashedValueBranch => { - NodeHeader::HashedValueBranch(nibble_count).encode_to(&mut output) - }, + NodeKind::HashedValueLeaf => + NodeHeader::HashedValueLeaf(nibble_count).encode_to(&mut output), + NodeKind::HashedValueBranch => + NodeHeader::HashedValueBranch(nibble_count).encode_to(&mut output), }; output.extend(partial); output diff --git a/primitives/trie/src/node_header.rs b/primitives/trie/src/node_header.rs index 3d74eec4d1cc5..f3544be65b2e9 100644 --- a/primitives/trie/src/node_header.rs +++ b/primitives/trie/src/node_header.rs @@ -54,18 +54,16 @@ impl Encode for NodeHeader { fn encode_to(&self, output: &mut T) { match self { NodeHeader::Null => output.push_byte(trie_constants::EMPTY_TRIE), - NodeHeader::Branch(true, nibble_count) => { - encode_size_and_prefix(*nibble_count, trie_constants::BRANCH_WITH_MASK, 2, output) - }, + NodeHeader::Branch(true, nibble_count) => + encode_size_and_prefix(*nibble_count, trie_constants::BRANCH_WITH_MASK, 2, output), NodeHeader::Branch(false, nibble_count) => encode_size_and_prefix( *nibble_count, trie_constants::BRANCH_WITHOUT_MASK, 2, output, ), - NodeHeader::Leaf(nibble_count) => { - encode_size_and_prefix(*nibble_count, trie_constants::LEAF_PREFIX_MASK, 2, output) - }, + NodeHeader::Leaf(nibble_count) => + encode_size_and_prefix(*nibble_count, trie_constants::LEAF_PREFIX_MASK, 2, output), NodeHeader::HashedValueBranch(nibble_count) => encode_size_and_prefix( *nibble_count, trie_constants::ALT_HASHING_BRANCH_WITH_MASK, @@ -88,16 +86,14 @@ impl Decode for NodeHeader { fn decode(input: &mut I) -> Result { let i = input.read_byte()?; if i == trie_constants::EMPTY_TRIE { - return Ok(NodeHeader::Null); + return Ok(NodeHeader::Null) } match i & (0b11 << 6) { trie_constants::LEAF_PREFIX_MASK => Ok(NodeHeader::Leaf(decode_size(i, input, 2)?)), - trie_constants::BRANCH_WITH_MASK => { - Ok(NodeHeader::Branch(true, decode_size(i, input, 2)?)) - }, - trie_constants::BRANCH_WITHOUT_MASK => { - Ok(NodeHeader::Branch(false, decode_size(i, input, 2)?)) - }, + trie_constants::BRANCH_WITH_MASK => + Ok(NodeHeader::Branch(true, decode_size(i, input, 2)?)), + trie_constants::BRANCH_WITHOUT_MASK => + Ok(NodeHeader::Branch(false, decode_size(i, input, 2)?)), trie_constants::EMPTY_TRIE => { if i & (0b111 << 5) == trie_constants::ALT_HASHING_LEAF_PREFIX_MASK { Ok(NodeHeader::HashedValueLeaf(decode_size(i, input, 3)?)) @@ -164,13 +160,13 @@ fn decode_size( let max_value = 255u8 >> prefix_mask; let mut result = (first & max_value) as usize; if result < max_value as usize { - return Ok(result); + return Ok(result) } result -= 1; loop { let n = input.read_byte()? as usize; if n < 255 { - return Ok(result + n + 1); + return Ok(result + n + 1) } result += 255; } diff --git a/primitives/trie/src/trie_codec.rs b/primitives/trie/src/trie_codec.rs index a13531e5956c3..d5ae9a43fb1eb 100644 --- a/primitives/trie/src/trie_codec.rs +++ b/primitives/trie/src/trie_codec.rs @@ -71,7 +71,7 @@ where // Only check root if expected root is passed as argument. if let Some(expected_root) = expected_root { if expected_root != &top_root { - return Err(Error::RootMismatch(top_root, *expected_root)); + return Err(Error::RootMismatch(top_root, *expected_root)) } } @@ -92,7 +92,7 @@ where let mut root = TrieHash::::default(); // still in a proof so prevent panic if root.as_mut().len() != value.as_slice().len() { - return Err(Error::InvalidChildRoot(key, value)); + return Err(Error::InvalidChildRoot(key, value)) } root.as_mut().copy_from_slice(value.as_ref()); child_tries.push(root); @@ -110,7 +110,7 @@ where } if !HashDBT::::contains(db, &top_root, EMPTY_PREFIX) { - return Err(Error::IncompleteProof); + return Err(Error::IncompleteProof) } let mut previous_extracted_child_trie = None; @@ -132,11 +132,11 @@ where if let Some(child_root) = previous_extracted_child_trie { // A child root was read from proof but is not present // in top trie. - return Err(Error::ExtraneousChildProof(child_root)); + return Err(Error::ExtraneousChildProof(child_root)) } if nodes_iter.next().is_some() { - return Err(Error::ExtraneousChildNode); + return Err(Error::ExtraneousChildNode) } Ok(top_root) @@ -171,7 +171,7 @@ where let mut root = TrieHash::::default(); if root.as_mut().len() != value.as_slice().len() { // some child trie root in top trie are not an encoded hash. - return Err(Error::InvalidChildRoot(key.to_vec(), value.to_vec())); + return Err(Error::InvalidChildRoot(key.to_vec(), value.to_vec())) } root.as_mut().copy_from_slice(value.as_ref()); child_tries.push(root); @@ -194,7 +194,7 @@ where if !HashDBT::::contains(partial_db, &child_root, EMPTY_PREFIX) { // child proof are allowed to be missing (unused root can be included // due to trie structure modification). - continue; + continue } let trie = crate::TrieDBBuilder::::new(partial_db, &child_root).build(); diff --git a/primitives/trie/src/trie_stream.rs b/primitives/trie/src/trie_stream.rs index 6b75b9b05ec54..435e6a986722e 100644 --- a/primitives/trie/src/trie_stream.rs +++ b/primitives/trie/src/trie_stream.rs @@ -57,18 +57,14 @@ fn fuse_nibbles_node(nibbles: &[u8], kind: NodeKind) -> impl Iterator let size = nibbles.len(); let iter_start = match kind { NodeKind::Leaf => size_and_prefix_iterator(size, trie_constants::LEAF_PREFIX_MASK, 2), - NodeKind::BranchNoValue => { - size_and_prefix_iterator(size, trie_constants::BRANCH_WITHOUT_MASK, 2) - }, - NodeKind::BranchWithValue => { - size_and_prefix_iterator(size, trie_constants::BRANCH_WITH_MASK, 2) - }, - NodeKind::HashedValueLeaf => { - size_and_prefix_iterator(size, trie_constants::ALT_HASHING_LEAF_PREFIX_MASK, 3) - }, - NodeKind::HashedValueBranch => { - size_and_prefix_iterator(size, trie_constants::ALT_HASHING_BRANCH_WITH_MASK, 4) - }, + NodeKind::BranchNoValue => + size_and_prefix_iterator(size, trie_constants::BRANCH_WITHOUT_MASK, 2), + NodeKind::BranchWithValue => + size_and_prefix_iterator(size, trie_constants::BRANCH_WITH_MASK, 2), + NodeKind::HashedValueLeaf => + size_and_prefix_iterator(size, trie_constants::ALT_HASHING_LEAF_PREFIX_MASK, 3), + NodeKind::HashedValueBranch => + size_and_prefix_iterator(size, trie_constants::ALT_HASHING_BRANCH_WITH_MASK, 4), }; iter_start .chain(if nibbles.len() % 2 == 1 { Some(nibbles[0]) } else { None }) diff --git a/primitives/version/proc-macro/src/decl_runtime_version.rs b/primitives/version/proc-macro/src/decl_runtime_version.rs index e7d54545832ba..9a25adfa5fca2 100644 --- a/primitives/version/proc-macro/src/decl_runtime_version.rs +++ b/primitives/version/proc-macro/src/decl_runtime_version.rs @@ -81,12 +81,8 @@ impl ParseRuntimeVersion { fn parse_expr(init_expr: &Expr) -> Result { let init_expr = match init_expr { Expr::Struct(ref e) => e, - _ => { - return Err(Error::new( - init_expr.span(), - "expected a struct initializer expression", - )) - }, + _ => + return Err(Error::new(init_expr.span(), "expected a struct initializer expression")), }; let mut parsed = ParseRuntimeVersion::default(); @@ -99,9 +95,8 @@ impl ParseRuntimeVersion { fn parse_field_value(&mut self, field_value: &FieldValue) -> Result<()> { let field_name = match field_value.member { syn::Member::Named(ref ident) => ident, - syn::Member::Unnamed(_) => { - return Err(Error::new(field_value.span(), "only named members must be used")) - }, + syn::Member::Unnamed(_) => + return Err(Error::new(field_value.span(), "only named members must be used")), }; fn parse_once( @@ -138,7 +133,7 @@ impl ParseRuntimeVersion { // the "runtime_version" custom section. `impl_runtime_apis` is responsible for // generating a custom section with the supported runtime apis descriptor. } else { - return Err(Error::new(field_name.span(), "unknown field")); + return Err(Error::new(field_name.span(), "unknown field")) } Ok(()) @@ -147,12 +142,11 @@ impl ParseRuntimeVersion { fn parse_num_literal(expr: &Expr) -> Result { let lit = match *expr { Expr::Lit(ExprLit { lit: Lit::Int(ref lit), .. }) => lit, - _ => { + _ => return Err(Error::new( expr.span(), "only numeric literals (e.g. `10`) are supported here", - )) - }, + )), }; lit.base10_parse::() } @@ -160,12 +154,11 @@ impl ParseRuntimeVersion { fn parse_num_literal_u8(expr: &Expr) -> Result { let lit = match *expr { Expr::Lit(ExprLit { lit: Lit::Int(ref lit), .. }) => lit, - _ => { + _ => return Err(Error::new( expr.span(), "only numeric literals (e.g. `10`) are supported here", - )) - }, + )), }; lit.base10_parse::() } diff --git a/primitives/version/src/lib.rs b/primitives/version/src/lib.rs index 0c9c410d1c198..0bd62f0bac5aa 100644 --- a/primitives/version/src/lib.rs +++ b/primitives/version/src/lib.rs @@ -288,9 +288,9 @@ pub fn core_version_from_apis(apis: &ApisVec) -> Option { impl RuntimeVersion { /// Check if this version matches other version for calling into runtime. pub fn can_call_with(&self, other: &RuntimeVersion) -> bool { - self.spec_version == other.spec_version - && self.spec_name == other.spec_name - && self.authoring_version == other.authoring_version + self.spec_version == other.spec_version && + self.spec_name == other.spec_name && + self.authoring_version == other.authoring_version } /// Check if the given api with `api_id` is implemented and the version passes the given @@ -344,8 +344,8 @@ impl NativeVersion { "`spec_name` does not match `{}` vs `{}`", self.runtime_version.spec_name, other.spec_name, )) - } else if self.runtime_version.authoring_version != other.authoring_version - && !self.can_author_with.contains(&other.authoring_version) + } else if self.runtime_version.authoring_version != other.authoring_version && + !self.can_author_with.contains(&other.authoring_version) { Err(format!( "`authoring_version` does not match `{version}` vs `{other_version}` and \ diff --git a/test-utils/client/src/lib.rs b/test-utils/client/src/lib.rs index c2913668042ab..d3e71f0ad28d6 100644 --- a/test-utils/client/src/lib.rs +++ b/test-utils/client/src/lib.rs @@ -365,7 +365,7 @@ pub(crate) fn parse_rpc_result( if let Some(error) = error { return Err(serde_json::from_value(error.clone()) - .expect("the JSONRPC result's error is always valid; qed")); + .expect("the JSONRPC result's error is always valid; qed")) } Ok(RpcTransactionOutput { result, receiver }) @@ -399,7 +399,7 @@ where if notification.is_new_best { blocks.insert(notification.hash); if blocks.len() == count { - break; + break } } } diff --git a/test-utils/derive/src/lib.rs b/test-utils/derive/src/lib.rs index 6ad9e7bb163f3..06b7d2463cbd8 100644 --- a/test-utils/derive/src/lib.rs +++ b/test-utils/derive/src/lib.rs @@ -37,7 +37,7 @@ fn parse_knobs( let vis = input.vis; if !sig.inputs.is_empty() { - return Err(syn::Error::new_spanned(&sig, "No arguments expected for tests.")); + return Err(syn::Error::new_spanned(&sig, "No arguments expected for tests.")) } let crate_name = match crate_name("substrate-test-utils") { diff --git a/test-utils/runtime/src/lib.rs b/test-utils/runtime/src/lib.rs index 58d28d6c938af..8bda4ea602428 100644 --- a/test-utils/runtime/src/lib.rs +++ b/test-utils/runtime/src/lib.rs @@ -197,7 +197,7 @@ impl BlindCheckable for Extrinsic { fn check(self) -> Result { match self { Extrinsic::AuthoritiesChange(new_auth) => Ok(Extrinsic::AuthoritiesChange(new_auth)), - Extrinsic::Transfer { transfer, signature, exhaust_resources_when_not_first } => { + Extrinsic::Transfer { transfer, signature, exhaust_resources_when_not_first } => if sp_runtime::verify_encoded_lazy(&signature, &transfer, &transfer.from) { Ok(Extrinsic::Transfer { transfer, @@ -206,8 +206,7 @@ impl BlindCheckable for Extrinsic { }) } else { Err(InvalidTransaction::BadProof.into()) - } - }, + }, Extrinsic::IncludeData(v) => Ok(Extrinsic::IncludeData(v)), Extrinsic::StorageChange(key, value) => Ok(Extrinsic::StorageChange(key, value)), Extrinsic::OffchainIndexSet(key, value) => Ok(Extrinsic::OffchainIndexSet(key, value)), @@ -536,13 +535,13 @@ impl frame_support::traits::PalletInfo for Runtime { fn index() -> Option { let type_id = sp_std::any::TypeId::of::