From 247e4a73ffd930dd8de415a1a8645da5531a373d Mon Sep 17 00:00:00 2001
From: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io>
Date: Mon, 14 Aug 2023 16:29:29 +0200
Subject: [PATCH] Use same `fmt` and `clippy` configs as in Substrate (#7611)

* Use same rustfmt.toml as Substrate

Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io>

* format format file

Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io>

* Format with new config

Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io>

* Add Substrate Clippy config

Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io>

* Print Clippy version in CI

Otherwise its difficult to reproduce locally.

Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io>

* Make fmt happy

Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io>

* Update node/core/pvf/src/error.rs

Co-authored-by: Tsvetomir Dimitrov <tsvetomir@parity.io>

* Update node/core/pvf/src/error.rs

Co-authored-by: Tsvetomir Dimitrov <tsvetomir@parity.io>

---------

Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io>
Co-authored-by: Tsvetomir Dimitrov <tsvetomir@parity.io>
---
 .cargo/config.toml                            |   1 +
 cli/src/cli.rs                                |   4 +-
 cli/src/command.rs                            |   4 +-
 core-primitives/src/lib.rs                    |  12 +-
 node/collation-generation/src/lib.rs          |  22 ++--
 node/collation-generation/src/tests.rs        |  10 +-
 .../approval-voting/src/approval_checking.rs  |   8 +-
 node/core/approval-voting/src/criteria.rs     |  15 +--
 node/core/approval-voting/src/import.rs       |  28 ++---
 node/core/approval-voting/src/lib.rs          |  35 +++---
 node/core/approval-voting/src/ops.rs          |   7 +-
 node/core/av-store/src/lib.rs                 |  35 +++---
 node/core/backing/src/lib.rs                  |  14 ++-
 node/core/backing/src/metrics.rs              |   3 +-
 node/core/backing/src/tests.rs                |   3 +-
 node/core/bitfield-signing/src/lib.rs         |   4 +-
 node/core/candidate-validation/src/lib.rs     |  33 +++---
 node/core/chain-selection/src/lib.rs          |   6 +-
 node/core/dispute-coordinator/src/db/v1.rs    |   7 +-
 node/core/dispute-coordinator/src/import.rs   |   8 +-
 .../dispute-coordinator/src/initialized.rs    |  12 +-
 node/core/dispute-coordinator/src/lib.rs      |  19 +--
 .../src/participation/queues/mod.rs           |  22 ++--
 .../src/participation/queues/tests.rs         |   4 +-
 .../src/participation/tests.rs                |   3 +-
 .../src/scraping/candidates.rs                |   6 +-
 .../dispute-coordinator/src/scraping/mod.rs   |  15 +--
 .../dispute-coordinator/src/scraping/tests.rs |  19 +--
 node/core/dispute-coordinator/src/tests.rs    |  62 ++++++----
 node/core/parachains-inherent/src/lib.rs      |  11 +-
 node/core/provisioner/src/disputes/mod.rs     |   6 +-
 .../src/disputes/prioritized_selection/mod.rs |  45 ++++----
 .../disputes/prioritized_selection/tests.rs   |  19 +--
 node/core/provisioner/src/error.rs            |   3 +-
 node/core/provisioner/src/lib.rs              |  33 +++---
 node/core/provisioner/src/metrics.rs          |   7 +-
 node/core/provisioner/src/tests.rs            |   3 +-
 node/core/pvf-checker/src/lib.rs              |   4 +-
 node/core/pvf-checker/src/tests.rs            |   4 +-
 node/core/pvf/common/src/error.rs             |  23 ++--
 node/core/pvf/common/src/executor_intf.rs     |  20 ++--
 node/core/pvf/common/src/worker/mod.rs        |   6 +-
 node/core/pvf/execute-worker/src/lib.rs       |   3 +-
 node/core/pvf/prepare-worker/src/lib.rs       |  17 +--
 .../pvf/prepare-worker/src/memory_stats.rs    |   4 +-
 node/core/pvf/src/artifacts.rs                |   3 +-
 node/core/pvf/src/error.rs                    |  29 ++---
 node/core/pvf/src/execute/queue.rs            |   3 +-
 node/core/pvf/src/execute/worker_intf.rs      |   8 +-
 node/core/pvf/src/host.rs                     |  19 +--
 node/core/pvf/src/lib.rs                      |  30 ++---
 node/core/pvf/src/metrics.rs                  |   3 +-
 node/core/pvf/src/prepare/pool.rs             |  16 +--
 node/core/pvf/src/prepare/queue.rs            |   5 +-
 node/core/pvf/src/prepare/worker_intf.rs      |   4 +-
 node/core/pvf/src/worker_intf.rs              |  26 +++--
 node/core/runtime-api/src/lib.rs              |  11 +-
 node/core/runtime-api/src/tests.rs            |   3 +-
 node/gum/src/lib.rs                           |  15 ++-
 node/jaeger/src/lib.rs                        |   3 +-
 node/jaeger/src/spans.rs                      |   4 +-
 node/malus/src/variants/common.rs             |  22 ++--
 .../src/variants/dispute_valid_candidates.rs  |  11 +-
 .../src/variants/suggest_garbage_candidate.rs |  11 +-
 node/metrics/src/lib.rs                       |   3 +-
 node/network/approval-distribution/src/lib.rs |  36 +++---
 .../src/requester/fetch_task/mod.rs           |   3 +-
 .../src/requester/mod.rs                      |  17 ++-
 .../src/futures_undead.rs                     |   1 -
 node/network/availability-recovery/src/lib.rs |  49 ++++----
 .../availability-recovery/src/tests.rs        |   3 +-
 node/network/bridge/src/rx/mod.rs             |   6 +-
 node/network/bridge/src/rx/tests.rs           |   5 +-
 node/network/bridge/src/tx/mod.rs             |   3 +-
 .../network/bridge/src/validator_discovery.rs |   7 +-
 .../src/collator_side/mod.rs                  |  15 +--
 .../src/collator_side/tests.rs                |  10 +-
 .../src/collator_side/validators_buffer.rs    |   6 +-
 .../src/validator_side/tests.rs               |   3 +-
 node/network/dispute-distribution/src/lib.rs  |   8 +-
 .../src/receiver/batches/batch.rs             |   4 +-
 .../src/receiver/batches/waiting_queue.rs     |   4 +-
 .../dispute-distribution/src/receiver/mod.rs  |  10 +-
 .../src/sender/send_task.rs                   |  11 +-
 node/network/gossip-support/src/lib.rs        |   3 +-
 node/network/protocol/src/grid_topology.rs    |  19 +--
 node/network/protocol/src/lib.rs              |   6 +-
 node/network/protocol/src/peer_set.rs         |   3 +-
 .../src/request_response/incoming/mod.rs      |   4 +-
 .../protocol/src/request_response/mod.rs      |   6 +-
 .../network/statement-distribution/src/lib.rs |  49 ++++----
 .../statement-distribution/src/tests.rs       |   8 +-
 node/overseer/src/lib.rs                      |  34 +++---
 node/primitives/src/disputes/message.rs       |   4 +-
 node/primitives/src/disputes/status.rs        |  11 +-
 node/primitives/src/lib.rs                    |  26 +++--
 node/service/src/chain_spec.rs                |  11 +-
 node/service/src/fake_runtime_api.rs          |   3 +-
 node/service/src/lib.rs                       |  14 ++-
 node/service/src/relay_chain_selection.rs     |  11 +-
 node/service/src/tests.rs                     |  26 ++---
 node/subsystem-test-helpers/src/lib.rs        |   3 +-
 node/subsystem-types/src/lib.rs               |   4 +-
 node/subsystem-types/src/messages.rs          | 109 ++++++++++--------
 node/subsystem-types/src/runtime_client.rs    |   7 +-
 node/subsystem-util/src/lib.rs                |   9 +-
 node/subsystem-util/src/nesting_sender.rs     |  21 ++--
 node/subsystem-util/src/reputation.rs         |   3 +-
 node/test/client/src/block_builder.rs         |  19 +--
 node/test/service/src/lib.rs                  |  12 +-
 parachain/src/primitives.rs                   |  17 +--
 .../test-parachains/adder/collator/src/lib.rs |   8 +-
 .../adder/collator/tests/integration.rs       |   3 +-
 .../undying/collator/src/lib.rs               |   8 +-
 .../undying/collator/tests/integration.rs     |   3 +-
 primitives/src/runtime_api.rs                 |  21 ++--
 primitives/src/v5/metrics.rs                  |  10 +-
 primitives/src/v5/mod.rs                      |  79 +++++++------
 primitives/test-helpers/src/lib.rs            |   3 +-
 runtime/common/slot_range_helper/src/lib.rs   |   8 +-
 runtime/common/src/assigned_slots.rs          |   9 +-
 runtime/common/src/auctions.rs                |  42 +++----
 runtime/common/src/claims.rs                  |  21 ++--
 runtime/common/src/crowdloan/migration.rs     |   4 +-
 runtime/common/src/crowdloan/mod.rs           |  67 ++++++-----
 runtime/common/src/integration_tests.rs       |   9 +-
 runtime/common/src/paras_registrar.rs         |  44 ++++---
 runtime/common/src/paras_sudo_wrapper.rs      |   8 +-
 runtime/common/src/purchase.rs                |  23 ++--
 runtime/common/src/slots/mod.rs               |  33 +++---
 runtime/common/src/traits.rs                  |  25 ++--
 runtime/kusama/src/xcm_config.rs              |  28 ++---
 runtime/parachains/src/builder.rs             |  16 +--
 runtime/parachains/src/configuration.rs       |  48 ++++----
 .../src/configuration/migration/v7.rs         |  21 ++--
 runtime/parachains/src/disputes.rs            |   7 +-
 runtime/parachains/src/disputes/migration.rs  |   6 +-
 runtime/parachains/src/disputes/tests.rs      |   6 +-
 runtime/parachains/src/hrmp.rs                |  19 +--
 runtime/parachains/src/inclusion/mod.rs       |  27 ++---
 runtime/parachains/src/initializer.rs         |  12 +-
 runtime/parachains/src/origin.rs              |   1 -
 runtime/parachains/src/paras/mod.rs           |  91 ++++++++-------
 runtime/parachains/src/paras/tests.rs         |   8 +-
 runtime/parachains/src/paras_inherent/mod.rs  |  42 ++++---
 .../parachains/src/paras_inherent/tests.rs    |  51 +++++---
 runtime/parachains/src/runtime_api_impl/v5.rs |   3 +-
 runtime/parachains/src/scheduler.rs           |  94 ++++++++-------
 runtime/parachains/src/scheduler/tests.rs     |  14 ++-
 runtime/parachains/src/shared.rs              |   4 +-
 runtime/parachains/src/util.rs                |   4 +-
 runtime/polkadot/src/governance/old.rs        |   3 +-
 runtime/polkadot/src/xcm_config.rs            |  23 ++--
 runtime/rococo/src/xcm_config.rs              |  12 +-
 runtime/test-runtime/src/lib.rs               |   4 +-
 runtime/test-runtime/src/xcm_config.rs        |   4 +-
 runtime/westend/src/lib.rs                    |   4 +-
 runtime/westend/src/xcm_config.rs             |   4 +-
 rustfmt.toml                                  |  12 +-
 scripts/ci/gitlab/pipeline/test.yml           |   1 +
 statement-table/src/generic.rs                |  17 +--
 tests/common.rs                               |   3 +-
 utils/staking-miner/src/opts.rs               |  26 +++--
 utils/staking-miner/src/rpc.rs                |   3 +-
 xcm/pallet-xcm-benchmarks/src/generic/mod.rs  |   9 +-
 xcm/pallet-xcm/src/lib.rs                     | 108 +++++++++--------
 xcm/src/double_encoded.rs                     |   8 +-
 xcm/src/lib.rs                                |  12 +-
 xcm/src/v2/junction.rs                        |  16 +--
 xcm/src/v2/mod.rs                             |  78 ++++++-------
 xcm/src/v2/multiasset.rs                      |  76 ++++++------
 xcm/src/v2/multilocation.rs                   |  29 ++---
 xcm/src/v2/traits.rs                          |  31 ++---
 xcm/src/v3/junction.rs                        |  39 ++++---
 xcm/src/v3/junctions.rs                       |  18 +--
 xcm/src/v3/mod.rs                             |  69 ++++++-----
 xcm/src/v3/multiasset.rs                      |  57 +++++----
 xcm/src/v3/multilocation.rs                   |  14 ++-
 xcm/src/v3/traits.rs                          |   9 +-
 xcm/xcm-builder/src/asset_conversion.rs       |   6 +-
 xcm/xcm-builder/src/currency_adapter.rs       |   4 +-
 xcm/xcm-builder/src/fungibles_adapter.rs      |   4 +-
 xcm/xcm-builder/src/location_conversion.rs    |   7 +-
 xcm/xcm-builder/src/origin_aliases.rs         |   3 +-
 xcm/xcm-builder/src/origin_conversion.rs      |  18 +--
 xcm/xcm-builder/src/tests/assets.rs           |   3 +-
 .../tests/bridging/paid_remote_relay_relay.rs |   6 +-
 xcm/xcm-builder/src/tests/mock.rs             |   4 +-
 xcm/xcm-builder/src/tests/querying.rs         |   3 +-
 xcm/xcm-builder/src/universal_exports.rs      |   8 +-
 xcm/xcm-builder/src/weight.rs                 |   5 +-
 xcm/xcm-builder/tests/scenarios.rs            |   4 +-
 xcm/xcm-executor/src/assets.rs                |  47 ++++----
 xcm/xcm-executor/src/lib.rs                   |  22 ++--
 xcm/xcm-executor/src/traits/asset_exchange.rs |   4 +-
 xcm/xcm-executor/src/traits/asset_lock.rs     |   4 +-
 xcm/xcm-executor/src/traits/conversion.rs     |   6 +-
 .../src/traits/filter_asset_location.rs       |   3 +-
 xcm/xcm-executor/src/traits/on_response.rs    |  12 +-
 xcm/xcm-executor/src/traits/should_execute.rs |   4 +-
 xcm/xcm-executor/src/traits/transact_asset.rs |  53 +++++----
 xcm/xcm-executor/src/traits/weight.rs         |   8 +-
 xcm/xcm-simulator/src/lib.rs                  |  26 ++---
 203 files changed, 1880 insertions(+), 1504 deletions(-)

diff --git a/.cargo/config.toml b/.cargo/config.toml
index 66b28b3485d8..4796a2c26965 100644
--- a/.cargo/config.toml
+++ b/.cargo/config.toml
@@ -29,4 +29,5 @@ rustflags = [
   "-Aclippy::needless_option_as_deref",  # false positives
   "-Aclippy::derivable_impls",           # false positives
   "-Aclippy::stable_sort_primitive",     # prefer stable sort
+  "-Aclippy::extra-unused-type-parameters", # stylistic
 ]
diff --git a/cli/src/cli.rs b/cli/src/cli.rs
index e78213cf11c8..c13340d91a04 100644
--- a/cli/src/cli.rs
+++ b/cli/src/cli.rs
@@ -130,8 +130,8 @@ pub struct RunCmd {
 	pub overseer_channel_capacity_override: Option<usize>,
 
 	/// Path to the directory where auxiliary worker binaries reside. If not specified, the main
-	/// binary's directory is searched first, then `/usr/lib/polkadot` is searched. TESTING ONLY: if
-	/// the path points to an executable rather then directory, that executable is used both as
+	/// binary's directory is searched first, then `/usr/lib/polkadot` is searched. TESTING ONLY:
+	/// if the path points to an executable rather then directory, that executable is used both as
 	/// preparation and execution worker.
 	#[arg(long, value_name = "PATH")]
 	pub workers_path: Option<PathBuf>,
diff --git a/cli/src/command.rs b/cli/src/command.rs
index c8e8673c6d70..c75f96ee2ebf 100644
--- a/cli/src/command.rs
+++ b/cli/src/command.rs
@@ -148,8 +148,8 @@ impl SubstrateCli for Cli {
 				let chain_spec = Box::new(service::PolkadotChainSpec::from_json_file(path.clone())?)
 					as Box<dyn service::ChainSpec>;
 
-				// When `force_*` is given or the file name starts with the name of one of the known chains,
-				// we use the chain spec for the specific chain.
+				// When `force_*` is given or the file name starts with the name of one of the known
+				// chains, we use the chain spec for the specific chain.
 				if self.run.force_rococo ||
 					chain_spec.is_rococo() ||
 					chain_spec.is_wococo() ||
diff --git a/core-primitives/src/lib.rs b/core-primitives/src/lib.rs
index 5e06966ecfee..aa01cf8dfc45 100644
--- a/core-primitives/src/lib.rs
+++ b/core-primitives/src/lib.rs
@@ -91,10 +91,10 @@ impl sp_std::fmt::Debug for CandidateHash {
 pub type Nonce = u32;
 
 /// The balance of an account.
-/// 128-bits (or 38 significant decimal figures) will allow for 10 m currency (`10^7`) at a resolution
-/// to all for one second's worth of an annualised 50% reward be paid to a unit holder (`10^11` unit
-/// denomination), or `10^18` total atomic units, to grow at 50%/year for 51 years (`10^9` multiplier)
-/// for an eventual total of `10^27` units (27 significant decimal figures).
+/// 128-bits (or 38 significant decimal figures) will allow for 10 m currency (`10^7`) at a
+/// resolution to all for one second's worth of an annualised 50% reward be paid to a unit holder
+/// (`10^11` unit denomination), or `10^18` total atomic units, to grow at 50%/year for 51 years
+/// (`10^9` multiplier) for an eventual total of `10^27` units (27 significant decimal figures).
 /// We round denomination to `10^12` (12 SDF), and leave the other redundancy at the upper end so
 /// that 32 bits may be multiplied with a balance in 128 bits without worrying about overflow.
 pub type Balance = u128;
@@ -121,8 +121,8 @@ pub type Remark = [u8; 32];
 /// The size of the message is limited by the `config.max_downward_message_size` parameter.
 pub type DownwardMessage = sp_std::vec::Vec<u8>;
 
-/// A wrapped version of `DownwardMessage`. The difference is that it has attached the block number when
-/// the message was sent.
+/// A wrapped version of `DownwardMessage`. The difference is that it has attached the block number
+/// when the message was sent.
 #[derive(Encode, Decode, Clone, sp_runtime::RuntimeDebug, PartialEq, TypeInfo)]
 pub struct InboundDownwardMessage<BlockNumber = crate::BlockNumber> {
 	/// The block number at which these messages were put into the downward message queue.
diff --git a/node/collation-generation/src/lib.rs b/node/collation-generation/src/lib.rs
index 02a0e8df8f61..8726ebf44c71 100644
--- a/node/collation-generation/src/lib.rs
+++ b/node/collation-generation/src/lib.rs
@@ -22,9 +22,11 @@
 //!
 //! * If there is no collation generation config, ignore.
 //! * Otherwise, for each `activated` head in the update:
-//!   * Determine if the para is scheduled on any core by fetching the `availability_cores` Runtime API.
+//!   * Determine if the para is scheduled on any core by fetching the `availability_cores` Runtime
+//!     API.
 //!   * Use the Runtime API subsystem to fetch the full validation data.
-//!   * Invoke the `collator`, and use its outputs to produce a [`CandidateReceipt`], signed with the configuration's `key`.
+//!   * Invoke the `collator`, and use its outputs to produce a [`CandidateReceipt`], signed with
+//!     the configuration's `key`.
 //!   * Dispatch a [`CollatorProtocolMessage::DistributeCollation`]`(receipt, pov)`.
 
 #![deny(missing_docs)]
@@ -77,8 +79,8 @@ impl CollationGenerationSubsystem {
 	/// Conceptually, this is very simple: it just loops forever.
 	///
 	/// - On incoming overseer messages, it starts or stops jobs as appropriate.
-	/// - On other incoming messages, if they can be converted into `Job::ToJob` and
-	///   include a hash, then they're forwarded to the appropriate individual job.
+	/// - On other incoming messages, if they can be converted into `Job::ToJob` and include a hash,
+	///   then they're forwarded to the appropriate individual job.
 	/// - On outgoing messages from the jobs, it forwards them to the overseer.
 	///
 	/// If `err_tx` is not `None`, errors are forwarded onto that channel as they occur.
@@ -109,9 +111,10 @@ impl CollationGenerationSubsystem {
 	}
 
 	// handle an incoming message. return true if we should break afterwards.
-	// note: this doesn't strictly need to be a separate function; it's more an administrative function
-	// so that we don't clutter the run loop. It could in principle be inlined directly into there.
-	// it should hopefully therefore be ok that it's an async function mutably borrowing self.
+	// note: this doesn't strictly need to be a separate function; it's more an administrative
+	// function so that we don't clutter the run loop. It could in principle be inlined directly
+	// into there. it should hopefully therefore be ok that it's an async function mutably borrowing
+	// self.
 	async fn handle_incoming<Context>(
 		&mut self,
 		incoming: SubsystemResult<FromOrchestra<<Context as SubsystemContext>::Message>>,
@@ -319,8 +322,9 @@ async fn handle_new_activations<Context>(
 						// As long as `POV_BOMB_LIMIT` is at least `max_pov_size`, this ensures
 						// that honest collators never produce a PoV which is uncompressed.
 						//
-						// As such, honest collators never produce an uncompressed PoV which starts with
-						// a compression magic number, which would lead validators to reject the collation.
+						// As such, honest collators never produce an uncompressed PoV which starts
+						// with a compression magic number, which would lead validators to reject
+						// the collation.
 						if encoded_size > validation_data.max_pov_size as usize {
 							gum::debug!(
 								target: LOG_TARGET,
diff --git a/node/collation-generation/src/tests.rs b/node/collation-generation/src/tests.rs
index b2534bcf36c1..1c98e1450941 100644
--- a/node/collation-generation/src/tests.rs
+++ b/node/collation-generation/src/tests.rs
@@ -203,9 +203,9 @@ mod handle_new_activations {
 			.into_inner();
 
 		// the only activated hash should be from the 4 hash:
-		// each activated hash generates two scheduled cores: one with its value * 4, one with its value * 5
-		// given that the test configuration has a `para_id` of 16, there's only one way to get that value: with the 4
-		// hash.
+		// each activated hash generates two scheduled cores: one with its value * 4, one with its
+		// value * 5 given that the test configuration has a `para_id` of 16, there's only one way
+		// to get that value: with the 4 hash.
 		assert_eq!(requested_validation_data, vec![[4; 32].into()]);
 	}
 
@@ -301,8 +301,8 @@ mod handle_new_activations {
 			.into_inner();
 
 		// we expect a single message to be sent, containing a candidate receipt.
-		// we don't care too much about the `commitments_hash` right now, but let's ensure that we've calculated the
-		// correct descriptor
+		// we don't care too much about the `commitments_hash` right now, but let's ensure that
+		// we've calculated the correct descriptor
 		let expect_pov_hash =
 			test_collation_compressed().proof_of_validity.into_compressed().hash();
 		let expect_validation_data_hash = test_validation_data().hash();
diff --git a/node/core/approval-voting/src/approval_checking.rs b/node/core/approval-voting/src/approval_checking.rs
index bfecdba73f88..f345b57029b5 100644
--- a/node/core/approval-voting/src/approval_checking.rs
+++ b/node/core/approval-voting/src/approval_checking.rs
@@ -42,8 +42,8 @@ pub enum RequiredTranches {
 		/// assignments that are before the local time.
 		maximum_broadcast: DelayTranche,
 		/// The clock drift, in ticks, to apply to the local clock when determining whether
-		/// to broadcast an assignment or when to schedule a wakeup. The local clock should be treated
-		/// as though it is `clock_drift` ticks earlier.
+		/// to broadcast an assignment or when to schedule a wakeup. The local clock should be
+		/// treated as though it is `clock_drift` ticks earlier.
 		clock_drift: Tick,
 	},
 	/// An exact number of required tranches and a number of no-shows. This indicates that
@@ -55,8 +55,8 @@ pub enum RequiredTranches {
 		/// The amount of missing votes that should be tolerated.
 		tolerated_missing: usize,
 		/// When the next no-show would be, if any. This is used to schedule the next wakeup in the
-		/// event that there are some assignments that don't have corresponding approval votes. If this
-		/// is `None`, all assignments have approvals.
+		/// event that there are some assignments that don't have corresponding approval votes. If
+		/// this is `None`, all assignments have approvals.
 		next_no_show: Option<Tick>,
 		/// The last tick at which a needed assignment was received.
 		last_assignment_tick: Option<Tick>,
diff --git a/node/core/approval-voting/src/criteria.rs b/node/core/approval-voting/src/criteria.rs
index 40a24e2dd937..0e1d18198c21 100644
--- a/node/core/approval-voting/src/criteria.rs
+++ b/node/core/approval-voting/src/criteria.rs
@@ -218,13 +218,14 @@ impl AssignmentCriteria for RealAssignmentCriteria {
 }
 
 /// Compute the assignments for a given block. Returns a map containing all assignments to cores in
-/// the block. If more than one assignment targets the given core, only the earliest assignment is kept.
+/// the block. If more than one assignment targets the given core, only the earliest assignment is
+/// kept.
 ///
-/// The `leaving_cores` parameter indicates all cores within the block where a candidate was included,
-/// as well as the group index backing those.
+/// The `leaving_cores` parameter indicates all cores within the block where a candidate was
+/// included, as well as the group index backing those.
 ///
-/// The current description of the protocol assigns every validator to check every core. But at different times.
-/// The idea is that most assignments are never triggered and fall by the wayside.
+/// The current description of the protocol assigns every validator to check every core. But at
+/// different times. The idea is that most assignments are never triggered and fall by the wayside.
 ///
 /// This will not assign to anything the local validator was part of the backing group for.
 pub(crate) fn compute_assignments(
@@ -463,8 +464,8 @@ pub(crate) enum InvalidAssignmentReason {
 ///   * Sample is out of bounds
 ///   * Validator is present in backing group.
 ///
-/// This function does not check whether the core is actually a valid assignment or not. That should be done
-/// outside the scope of this function.
+/// This function does not check whether the core is actually a valid assignment or not. That should
+/// be done outside the scope of this function.
 pub(crate) fn check_assignment_cert(
 	claimed_core_index: CoreIndex,
 	validator_index: ValidatorIndex,
diff --git a/node/core/approval-voting/src/import.rs b/node/core/approval-voting/src/import.rs
index e33caed49c5f..c504ba71b3c2 100644
--- a/node/core/approval-voting/src/import.rs
+++ b/node/core/approval-voting/src/import.rs
@@ -104,7 +104,8 @@ enum ImportedBlockInfoError {
 	VrfInfoUnavailable,
 }
 
-/// Computes information about the imported block. Returns an error if the info couldn't be extracted.
+/// Computes information about the imported block. Returns an error if the info couldn't be
+/// extracted.
 #[overseer::contextbounds(ApprovalVoting, prefix = self::overseer)]
 async fn imported_block_info<Context>(
 	ctx: &mut Context,
@@ -181,20 +182,21 @@ async fn imported_block_info<Context>(
 		// It's not obvious whether to use the hash or the parent hash for this, intuitively. We
 		// want to use the block hash itself, and here's why:
 		//
-		// First off, 'epoch' in BABE means 'session' in other places. 'epoch' is the terminology from
-		// the paper, which we fulfill using 'session's, which are a Substrate consensus concept.
+		// First off, 'epoch' in BABE means 'session' in other places. 'epoch' is the terminology
+		// from the paper, which we fulfill using 'session's, which are a Substrate consensus
+		// concept.
 		//
-		// In BABE, the on-chain and off-chain view of the current epoch can differ at epoch boundaries
-		// because epochs change precisely at a slot. When a block triggers a new epoch, the state of
-		// its parent will still have the old epoch. Conversely, we have the invariant that every
-		// block in BABE has the epoch _it was authored in_ within its post-state. So we use the
-		// block, and not its parent.
+		// In BABE, the on-chain and off-chain view of the current epoch can differ at epoch
+		// boundaries because epochs change precisely at a slot. When a block triggers a new epoch,
+		// the state of its parent will still have the old epoch. Conversely, we have the invariant
+		// that every block in BABE has the epoch _it was authored in_ within its post-state. So we
+		// use the block, and not its parent.
 		//
-		// It's worth nothing that Polkadot session changes, at least for the purposes of parachains,
-		// would function the same way, except for the fact that they're always delayed by one block.
-		// This gives us the opposite invariant for sessions - the parent block's post-state gives
-		// us the canonical information about the session index for any of its children, regardless
-		// of which slot number they might be produced at.
+		// It's worth nothing that Polkadot session changes, at least for the purposes of
+		// parachains, would function the same way, except for the fact that they're always delayed
+		// by one block. This gives us the opposite invariant for sessions - the parent block's
+		// post-state gives us the canonical information about the session index for any of its
+		// children, regardless of which slot number they might be produced at.
 		ctx.send_message(RuntimeApiMessage::Request(
 			block_hash,
 			RuntimeApiRequest::CurrentBabeEpoch(s_tx),
diff --git a/node/core/approval-voting/src/lib.rs b/node/core/approval-voting/src/lib.rs
index 05b92f459529..7e29e64c400a 100644
--- a/node/core/approval-voting/src/lib.rs
+++ b/node/core/approval-voting/src/lib.rs
@@ -1232,8 +1232,8 @@ async fn handle_from_overseer<Context>(
 									);
 
 									// Our first wakeup will just be the tranche of our assignment,
-									// if any. This will likely be superseded by incoming assignments
-									// and approvals which trigger rescheduling.
+									// if any. This will likely be superseded by incoming
+									// assignments and approvals which trigger rescheduling.
 									actions.push(Action::ScheduleWakeup {
 										block_hash: block_batch.block_hash,
 										block_number: block_batch.block_number,
@@ -1256,12 +1256,14 @@ async fn handle_from_overseer<Context>(
 			crate::ops::canonicalize(db, block_number, block_hash)
 				.map_err(|e| SubsystemError::with_origin("db", e))?;
 
-			// `prune_finalized_wakeups` prunes all finalized block hashes. We prune spans accordingly.
+			// `prune_finalized_wakeups` prunes all finalized block hashes. We prune spans
+			// accordingly.
 			wakeups.prune_finalized_wakeups(block_number, &mut state.spans);
 
-			// // `prune_finalized_wakeups` prunes all finalized block hashes. We prune spans accordingly.
-			// let hash_set = wakeups.block_numbers.values().flatten().collect::<HashSet<_>>();
-			// state.spans.retain(|hash, _| hash_set.contains(hash));
+			// // `prune_finalized_wakeups` prunes all finalized block hashes. We prune spans
+			// accordingly. let hash_set =
+			// wakeups.block_numbers.values().flatten().collect::<HashSet<_>>(); state.spans.
+			// retain(|hash, _| hash_set.contains(hash));
 
 			Vec::new()
 		},
@@ -1403,8 +1405,8 @@ async fn get_approval_signatures_for_candidate<Context>(
 			tx_distribution,
 		));
 
-		// Because of the unbounded sending and the nature of the call (just fetching data from state),
-		// this should not block long:
+		// Because of the unbounded sending and the nature of the call (just fetching data from
+		// state), this should not block long:
 		match rx_distribution.timeout(WAIT_FOR_SIGS_TIMEOUT).await {
 			None => {
 				gum::warn!(
@@ -2117,9 +2119,10 @@ impl ApprovalStateTransition {
 	}
 }
 
-// Advance the approval state, either by importing an approval vote which is already checked to be valid and corresponding to an assigned
-// validator on the candidate and block, or by noting that there are no further wakeups or tranches needed. This updates the block entry and candidate entry as
-// necessary and schedules any further wakeups.
+// Advance the approval state, either by importing an approval vote which is already checked to be
+// valid and corresponding to an assigned validator on the candidate and block, or by noting that
+// there are no further wakeups or tranches needed. This updates the block entry and candidate entry
+// as necessary and schedules any further wakeups.
 async fn advance_approval_state<Sender>(
 	sender: &mut Sender,
 	state: &State,
@@ -2251,7 +2254,8 @@ where
 		// 1. This is not a local approval, as we don't store anything new in the approval entry.
 		// 2. The candidate is not newly approved, as we haven't altered the approval entry's
 		//	  approved flag with `mark_approved` above.
-		// 3. The approver, if any, had already approved the candidate, as we haven't altered the bitfield.
+		// 3. The approver, if any, had already approved the candidate, as we haven't altered the
+		// bitfield.
 		if transition.is_local_approval() || newly_approved || !already_approved_by.unwrap_or(true)
 		{
 			// In all other cases, we need to write the candidate entry.
@@ -2279,7 +2283,8 @@ fn should_trigger_assignment(
 					&approval_entry,
 					RequiredTranches::All,
 				)
-				.is_approved(Tick::max_value()), // when all are required, we are just waiting for the first 1/3+
+				// when all are required, we are just waiting for the first 1/3+
+				.is_approved(Tick::max_value()),
 				RequiredTranches::Pending { maximum_broadcast, clock_drift, .. } => {
 					let drifted_tranche_now =
 						tranche_now.saturating_sub(clock_drift as DelayTranche);
@@ -2615,8 +2620,8 @@ async fn launch_approval<Context>(
 		match val_rx.await {
 			Err(_) => return ApprovalState::failed(validator_index, candidate_hash),
 			Ok(Ok(ValidationResult::Valid(_, _))) => {
-				// Validation checked out. Issue an approval command. If the underlying service is unreachable,
-				// then there isn't anything we can do.
+				// Validation checked out. Issue an approval command. If the underlying service is
+				// unreachable, then there isn't anything we can do.
 
 				gum::trace!(target: LOG_TARGET, ?candidate_hash, ?para_id, "Candidate Valid");
 
diff --git a/node/core/approval-voting/src/ops.rs b/node/core/approval-voting/src/ops.rs
index 4d6dc5e7ad66..6f57b2f80e8a 100644
--- a/node/core/approval-voting/src/ops.rs
+++ b/node/core/approval-voting/src/ops.rs
@@ -161,7 +161,8 @@ pub fn canonicalize(
 		}
 	}
 
-	// Update all blocks-at-height keys, deleting all those which now have empty `block_assignments`.
+	// Update all blocks-at-height keys, deleting all those which now have empty
+	// `block_assignments`.
 	for (h, at) in visited_heights.into_iter() {
 		if at.is_empty() {
 			overlay_db.delete_blocks_at_height(h);
@@ -170,8 +171,8 @@ pub fn canonicalize(
 		}
 	}
 
-	// due to the fork pruning, this range actually might go too far above where our actual highest block is,
-	// if a relatively short fork is canonicalized.
+	// due to the fork pruning, this range actually might go too far above where our actual highest
+	// block is, if a relatively short fork is canonicalized.
 	// TODO https://github.com/paritytech/polkadot/issues/3389
 	let new_range = StoredBlockRange(canon_number + 1, std::cmp::max(range.1, canon_number + 2));
 
diff --git a/node/core/av-store/src/lib.rs b/node/core/av-store/src/lib.rs
index 675d41b79c06..ef7dcecac075 100644
--- a/node/core/av-store/src/lib.rs
+++ b/node/core/av-store/src/lib.rs
@@ -67,8 +67,8 @@ const META_PREFIX: &[u8; 4] = b"meta";
 const UNFINALIZED_PREFIX: &[u8; 11] = b"unfinalized";
 const PRUNE_BY_TIME_PREFIX: &[u8; 13] = b"prune_by_time";
 
-// We have some keys we want to map to empty values because existence of the key is enough. We use this because
-// rocksdb doesn't support empty values.
+// We have some keys we want to map to empty values because existence of the key is enough. We use
+// this because rocksdb doesn't support empty values.
 const TOMBSTONE_VALUE: &[u8] = b" ";
 
 /// Unavailable blocks are kept for 1 hour.
@@ -139,10 +139,11 @@ enum State {
 	/// Candidate data was first observed at the given time but is not available in any block.
 	#[codec(index = 0)]
 	Unavailable(BETimestamp),
-	/// The candidate was first observed at the given time and was included in the given list of unfinalized blocks, which may be
-	/// empty. The timestamp here is not used for pruning. Either one of these blocks will be finalized or the state will regress to
-	/// `State::Unavailable`, in which case the same timestamp will be reused. Blocks are sorted ascending first by block number and
-	/// then hash.
+	/// The candidate was first observed at the given time and was included in the given list of
+	/// unfinalized blocks, which may be empty. The timestamp here is not used for pruning. Either
+	/// one of these blocks will be finalized or the state will regress to `State::Unavailable`, in
+	/// which case the same timestamp will be reused. Blocks are sorted ascending first by block
+	/// number and then hash.
 	#[codec(index = 1)]
 	Unfinalized(BETimestamp, Vec<(BEBlockNumber, Hash)>),
 	/// Candidate data has appeared in a finalized block and did so at the given time.
@@ -820,8 +821,8 @@ fn note_block_included(
 
 	match load_meta(db, config, &candidate_hash)? {
 		None => {
-			// This is alarming. We've observed a block being included without ever seeing it backed.
-			// Warn and ignore.
+			// This is alarming. We've observed a block being included without ever seeing it
+			// backed. Warn and ignore.
 			gum::warn!(
 				target: LOG_TARGET,
 				?candidate_hash,
@@ -894,9 +895,9 @@ async fn process_block_finalized<Context>(
 		let mut db_transaction = DBTransaction::new();
 		let (start_prefix, end_prefix) = finalized_block_range(finalized_number);
 
-		// We have to do some juggling here of the `iter` to make sure it doesn't cross the `.await` boundary
-		// as it is not `Send`. That is why we create the iterator once within this loop, drop it,
-		// do an asynchronous request, and then instantiate the exact same iterator again.
+		// We have to do some juggling here of the `iter` to make sure it doesn't cross the `.await`
+		// boundary as it is not `Send`. That is why we create the iterator once within this loop,
+		// drop it, do an asynchronous request, and then instantiate the exact same iterator again.
 		let batch_num = {
 			let mut iter = subsystem
 				.db
@@ -961,8 +962,9 @@ async fn process_block_finalized<Context>(
 
 		update_blocks_at_finalized_height(&subsystem, &mut db_transaction, batch, batch_num, now)?;
 
-		// We need to write at the end of the loop so the prefix iterator doesn't pick up the same values again
-		// in the next iteration. Another unfortunate effect of having to re-initialize the iterator.
+		// We need to write at the end of the loop so the prefix iterator doesn't pick up the same
+		// values again in the next iteration. Another unfortunate effect of having to re-initialize
+		// the iterator.
 		subsystem.db.write(db_transaction)?;
 	}
 
@@ -1215,7 +1217,8 @@ fn process_message(
 					// We do not bubble up internal errors to caller subsystems, instead the
 					// tx channel is dropped and that error is caught by the caller subsystem.
 					//
-					// We bubble up the specific error here so `av-store` logs still tell what happend.
+					// We bubble up the specific error here so `av-store` logs still tell what
+					// happend.
 					return Err(e.into())
 				},
 			}
@@ -1298,8 +1301,8 @@ fn store_available_data(
 		.with_candidate(candidate_hash)
 		.with_pov(&available_data.pov);
 
-	// Important note: This check below is critical for consensus and the `backing` subsystem relies on it to
-	// ensure candidate validity.
+	// Important note: This check below is critical for consensus and the `backing` subsystem relies
+	// on it to ensure candidate validity.
 	let chunks = erasure::obtain_chunks_v1(n_validators, &available_data)?;
 	let branches = erasure::branches(chunks.as_ref());
 
diff --git a/node/core/backing/src/lib.rs b/node/core/backing/src/lib.rs
index dc0863cfa0b3..0abfbfad7657 100644
--- a/node/core/backing/src/lib.rs
+++ b/node/core/backing/src/lib.rs
@@ -422,7 +422,8 @@ struct CandidateBackingJob<Context> {
 	awaiting_validation: HashSet<CandidateHash>,
 	/// Data needed for retrying in case of `ValidatedCandidateCommand::AttestNoPoV`.
 	fallbacks: HashMap<CandidateHash, (AttestingData, Option<jaeger::Span>)>,
-	/// `Some(h)` if this job has already issued `Seconded` statement for some candidate with `h` hash.
+	/// `Some(h)` if this job has already issued `Seconded` statement for some candidate with `h`
+	/// hash.
 	seconded: Option<CandidateHash>,
 	/// The candidates that are includable, by hash. Each entry here indicates
 	/// that we've sent the provisioner the backed candidate.
@@ -562,9 +563,10 @@ async fn store_available_data(
 	expected_erasure_root: Hash,
 ) -> Result<(), Error> {
 	let (tx, rx) = oneshot::channel();
-	// Important: the `av-store` subsystem will check if the erasure root of the `available_data` matches `expected_erasure_root`
-	// which was provided by the collator in the `CandidateReceipt`. This check is consensus critical and the `backing` subsystem
-	// relies on it for ensuring candidate validity.
+	// Important: the `av-store` subsystem will check if the erasure root of the `available_data`
+	// matches `expected_erasure_root` which was provided by the collator in the `CandidateReceipt`.
+	// This check is consensus critical and the `backing` subsystem relies on it for ensuring
+	// candidate validity.
 	sender
 		.send_message(AvailabilityStoreMessage::StoreAvailableData {
 			candidate_hash,
@@ -582,8 +584,8 @@ async fn store_available_data(
 
 // Make a `PoV` available.
 //
-// This calls the AV store to write the available data to storage. The AV store also checks the erasure root matches
-// the `expected_erasure_root`.
+// This calls the AV store to write the available data to storage. The AV store also checks the
+// erasure root matches the `expected_erasure_root`.
 // This returns `Err()` on erasure root mismatch or due to any AV store subsystem error.
 //
 // Otherwise, it returns either `Ok(())`
diff --git a/node/core/backing/src/metrics.rs b/node/core/backing/src/metrics.rs
index 8468ea005404..77f0e7f9d92a 100644
--- a/node/core/backing/src/metrics.rs
+++ b/node/core/backing/src/metrics.rs
@@ -54,7 +54,8 @@ impl Metrics {
 		self.0.as_ref().map(|metrics| metrics.process_statement.start_timer())
 	}
 
-	/// Provide a timer for handling `CandidateBackingMessage::GetBackedCandidates` which observes on drop.
+	/// Provide a timer for handling `CandidateBackingMessage::GetBackedCandidates` which observes
+	/// on drop.
 	pub fn time_get_backed_candidates(
 		&self,
 	) -> Option<metrics::prometheus::prometheus::HistogramTimer> {
diff --git a/node/core/backing/src/tests.rs b/node/core/backing/src/tests.rs
index 35c83297fa71..386cc9e2279e 100644
--- a/node/core/backing/src/tests.rs
+++ b/node/core/backing/src/tests.rs
@@ -84,7 +84,8 @@ impl Default for TestState {
 		];
 
 		let keystore = Arc::new(sc_keystore::LocalKeystore::in_memory());
-		// Make sure `Alice` key is in the keystore, so this mocked node will be a parachain validator.
+		// Make sure `Alice` key is in the keystore, so this mocked node will be a parachain
+		// validator.
 		Keystore::sr25519_generate_new(&*keystore, ValidatorId::ID, Some(&validators[0].to_seed()))
 			.expect("Insert key into keystore");
 
diff --git a/node/core/bitfield-signing/src/lib.rs b/node/core/bitfield-signing/src/lib.rs
index 1e4d556de7ca..f29e827e1090 100644
--- a/node/core/bitfield-signing/src/lib.rs
+++ b/node/core/bitfield-signing/src/lib.rs
@@ -137,8 +137,8 @@ async fn get_availability_cores(
 
 /// - get the list of core states from the runtime
 /// - for each core, concurrently determine chunk availability (see `get_core_availability`)
-/// - return the bitfield if there were no errors at any point in this process
-///   (otherwise, it's prone to false negatives)
+/// - return the bitfield if there were no errors at any point in this process (otherwise, it's
+///   prone to false negatives)
 async fn construct_availability_bitfield(
 	relay_parent: Hash,
 	span: &jaeger::Span,
diff --git a/node/core/candidate-validation/src/lib.rs b/node/core/candidate-validation/src/lib.rs
index 93a7e05c8724..f53f2a6aee06 100644
--- a/node/core/candidate-validation/src/lib.rs
+++ b/node/core/candidate-validation/src/lib.rs
@@ -67,15 +67,15 @@ mod tests;
 
 const LOG_TARGET: &'static str = "parachain::candidate-validation";
 
-/// The amount of time to wait before retrying after a retry-able backing validation error. We use a lower value for the
-/// backing case, to fit within the lower backing timeout.
+/// The amount of time to wait before retrying after a retry-able backing validation error. We use a
+/// lower value for the backing case, to fit within the lower backing timeout.
 #[cfg(not(test))]
 const PVF_BACKING_EXECUTION_RETRY_DELAY: Duration = Duration::from_millis(500);
 #[cfg(test)]
 const PVF_BACKING_EXECUTION_RETRY_DELAY: Duration = Duration::from_millis(200);
-/// The amount of time to wait before retrying after a retry-able approval validation error. We use a higher value for
-/// the approval case since we have more time, and if we wait longer it is more likely that transient conditions will
-/// resolve.
+/// The amount of time to wait before retrying after a retry-able approval validation error. We use
+/// a higher value for the approval case since we have more time, and if we wait longer it is more
+/// likely that transient conditions will resolve.
 #[cfg(not(test))]
 const PVF_APPROVAL_EXECUTION_RETRY_DELAY: Duration = Duration::from_secs(3);
 #[cfg(test)]
@@ -451,9 +451,9 @@ where
 	const ASSUMPTIONS: &[OccupiedCoreAssumption] = &[
 		OccupiedCoreAssumption::Included,
 		OccupiedCoreAssumption::TimedOut,
-		// `TimedOut` and `Free` both don't perform any speculation and therefore should be the same
-		// for our purposes here. In other words, if `TimedOut` matched then the `Free` must be
-		// matched as well.
+		// `TimedOut` and `Free` both don't perform any speculation and therefore should be the
+		// same for our purposes here. In other words, if `TimedOut` matched then the `Free` must
+		// be matched as well.
 	];
 
 	// Consider running these checks in parallel to reduce validation latency.
@@ -482,9 +482,10 @@ where
 		AssumptionCheckOutcome::Matches(validation_data, validation_code) =>
 			Ok(Some((validation_data, validation_code))),
 		AssumptionCheckOutcome::DoesNotMatch => {
-			// If neither the assumption of the occupied core having the para included or the assumption
-			// of the occupied core timing out are valid, then the persisted_validation_data_hash in the descriptor
-			// is not based on the relay parent and is thus invalid.
+			// If neither the assumption of the occupied core having the para included or the
+			// assumption of the occupied core timing out are valid, then the
+			// persisted_validation_data_hash in the descriptor is not based on the relay parent and
+			// is thus invalid.
 			Ok(None)
 		},
 		AssumptionCheckOutcome::BadRequest =>
@@ -704,7 +705,8 @@ where
 						"Invalid candidate (commitments hash)"
 					);
 
-					// If validation produced a new set of commitments, we treat the candidate as invalid.
+					// If validation produced a new set of commitments, we treat the candidate as
+					// invalid.
 					Ok(ValidationResult::Invalid(InvalidCandidate::CommitmentsHashMismatch))
 				} else {
 					Ok(ValidationResult::Valid(outputs, persisted_validation_data))
@@ -744,7 +746,8 @@ trait ValidationBackend {
 			prep_timeout,
 			PrepareJobKind::Compilation,
 		);
-		// We keep track of the total time that has passed and stop retrying if we are taking too long.
+		// We keep track of the total time that has passed and stop retrying if we are taking too
+		// long.
 		let total_time_start = Instant::now();
 
 		let mut validation_result =
@@ -780,8 +783,8 @@ trait ValidationBackend {
 				_ => break,
 			}
 
-			// If we got a possibly transient error, retry once after a brief delay, on the assumption
-			// that the conditions that caused this error may have resolved on their own.
+			// If we got a possibly transient error, retry once after a brief delay, on the
+			// assumption that the conditions that caused this error may have resolved on their own.
 			{
 				// Wait a brief delay before retrying.
 				futures_timer::Delay::new(retry_delay).await;
diff --git a/node/core/chain-selection/src/lib.rs b/node/core/chain-selection/src/lib.rs
index 4b512347dae4..aa5bb9548ad2 100644
--- a/node/core/chain-selection/src/lib.rs
+++ b/node/core/chain-selection/src/lib.rs
@@ -44,13 +44,15 @@ mod tree;
 mod tests;
 
 const LOG_TARGET: &str = "parachain::chain-selection";
-/// Timestamp based on the 1 Jan 1970 UNIX base, which is persistent across node restarts and OS reboots.
+/// Timestamp based on the 1 Jan 1970 UNIX base, which is persistent across node restarts and OS
+/// reboots.
 type Timestamp = u64;
 
 // If a block isn't approved in 120 seconds, nodes will abandon it
 // and begin building on another chain.
 const STAGNANT_TIMEOUT: Timestamp = 120;
-// Delay prunning of the stagnant keys in prune only mode by 25 hours to avoid interception with the finality
+// Delay prunning of the stagnant keys in prune only mode by 25 hours to avoid interception with the
+// finality
 const STAGNANT_PRUNE_DELAY: Timestamp = 25 * 60 * 60;
 // Maximum number of stagnant entries cleaned during one `STAGNANT_TIMEOUT` iteration
 const MAX_STAGNANT_ENTRIES: usize = 1000;
diff --git a/node/core/dispute-coordinator/src/db/v1.rs b/node/core/dispute-coordinator/src/db/v1.rs
index 2d14f5151003..f0f17d2325d6 100644
--- a/node/core/dispute-coordinator/src/db/v1.rs
+++ b/node/core/dispute-coordinator/src/db/v1.rs
@@ -52,8 +52,8 @@ const CLEANED_VOTES_WATERMARK_KEY: &[u8; 23] = b"cleaned-votes-watermark";
 /// this should not be done at once, but rather in smaller batches so nodes won't get stalled by
 /// this.
 ///
-/// 300 is with session duration of 1 hour and 30 parachains around <3_000_000 key purges in the worst
-/// case. Which is already quite a lot, at the same time we have around 21_000 sessions on
+/// 300 is with session duration of 1 hour and 30 parachains around <3_000_000 key purges in the
+/// worst case. Which is already quite a lot, at the same time we have around 21_000 sessions on
 /// Kusama. This means at 300 purged sessions per session, cleaning everything up will take
 /// around 3 days. Depending on how severe disk usage becomes, we might want to bump the batch
 /// size, at the cost of risking issues at session boundaries (performance).
@@ -346,7 +346,8 @@ pub(crate) fn note_earliest_session(
 
 				if pruned_disputes.len() != 0 {
 					overlay_db.write_recent_disputes(new_recent_disputes);
-					// Note: Deleting old candidate votes is handled in `write` based on the earliest session.
+					// Note: Deleting old candidate votes is handled in `write` based on the
+					// earliest session.
 				}
 			}
 		},
diff --git a/node/core/dispute-coordinator/src/import.rs b/node/core/dispute-coordinator/src/import.rs
index 912521834075..0da3723ebf22 100644
--- a/node/core/dispute-coordinator/src/import.rs
+++ b/node/core/dispute-coordinator/src/import.rs
@@ -19,12 +19,12 @@
 //! This module encapsulates the actual logic for importing new votes and provides easy access of
 //! the current state for votes for a particular candidate.
 //!
-//! In particular there is `CandidateVoteState` which tells what can be concluded for a particular set of
-//! votes. E.g. whether a dispute is ongoing, whether it is confirmed, concluded, ..
+//! In particular there is `CandidateVoteState` which tells what can be concluded for a particular
+//! set of votes. E.g. whether a dispute is ongoing, whether it is confirmed, concluded, ..
 //!
 //! Then there is `ImportResult` which reveals information about what changed once additional votes
-//! got imported on top of an existing `CandidateVoteState` and reveals "dynamic" information, like whether
-//! due to the import a dispute was raised/got confirmed, ...
+//! got imported on top of an existing `CandidateVoteState` and reveals "dynamic" information, like
+//! whether due to the import a dispute was raised/got confirmed, ...
 
 use std::collections::{BTreeMap, HashMap, HashSet};
 
diff --git a/node/core/dispute-coordinator/src/initialized.rs b/node/core/dispute-coordinator/src/initialized.rs
index 2a1d8fd4b83c..c1d02ef976cb 100644
--- a/node/core/dispute-coordinator/src/initialized.rs
+++ b/node/core/dispute-coordinator/src/initialized.rs
@@ -92,8 +92,8 @@ pub struct InitialData {
 pub(crate) struct Initialized {
 	keystore: Arc<LocalKeystore>,
 	runtime_info: RuntimeInfo,
-	/// This is the highest `SessionIndex` seen via `ActiveLeavesUpdate`. It doesn't matter if it was
-	/// cached successfully or not. It is used to detect ancient disputes.
+	/// This is the highest `SessionIndex` seen via `ActiveLeavesUpdate`. It doesn't matter if it
+	/// was cached successfully or not. It is used to detect ancient disputes.
 	highest_session_seen: SessionIndex,
 	/// Will be set to `true` if an error occured during the last caching attempt
 	gaps_in_cache: bool,
@@ -308,8 +308,8 @@ impl Initialized {
 				Ok(session_idx)
 					if self.gaps_in_cache || session_idx > self.highest_session_seen =>
 				{
-					// Fetch the last `DISPUTE_WINDOW` number of sessions unless there are no gaps in
-					// cache and we are not missing too many `SessionInfo`s
+					// Fetch the last `DISPUTE_WINDOW` number of sessions unless there are no gaps
+					// in cache and we are not missing too many `SessionInfo`s
 					let mut lower_bound = session_idx.saturating_sub(DISPUTE_WINDOW.get() - 1);
 					if !self.gaps_in_cache && self.highest_session_seen > lower_bound {
 						lower_bound = self.highest_session_seen + 1
@@ -1133,8 +1133,8 @@ impl Initialized {
 		}
 
 		// Participate in dispute if we did not cast a vote before and actually have keys to cast a
-		// local vote. Disputes should fall in one of the categories below, otherwise we will refrain
-		// from participation:
+		// local vote. Disputes should fall in one of the categories below, otherwise we will
+		// refrain from participation:
 		// - `is_included` lands in prioritised queue
 		// - `is_confirmed` | `is_backed` lands in best effort queue
 		// We don't participate in disputes on finalized candidates.
diff --git a/node/core/dispute-coordinator/src/lib.rs b/node/core/dispute-coordinator/src/lib.rs
index 02bb6ef9ecda..a2c500e08e28 100644
--- a/node/core/dispute-coordinator/src/lib.rs
+++ b/node/core/dispute-coordinator/src/lib.rs
@@ -17,12 +17,13 @@
 //! Implements the dispute coordinator subsystem.
 //!
 //! This is the central subsystem of the node-side components which participate in disputes.
-//! This subsystem wraps a database which tracks all statements observed by all validators over some window of sessions.
-//! Votes older than this session window are pruned.
+//! This subsystem wraps a database which tracks all statements observed by all validators over some
+//! window of sessions. Votes older than this session window are pruned.
 //!
-//! This subsystem will be the point which produce dispute votes, either positive or negative, based on locally-observed
-//! validation results as well as a sink for votes received by other subsystems. When importing a dispute vote from
-//! another node, this will trigger dispute participation to recover and validate the block.
+//! This subsystem will be the point which produce dispute votes, either positive or negative, based
+//! on locally-observed validation results as well as a sink for votes received by other subsystems.
+//! When importing a dispute vote from another node, this will trigger dispute participation to
+//! recover and validate the block.
 
 use std::{num::NonZeroUsize, sync::Arc};
 
@@ -92,10 +93,10 @@ mod spam_slots;
 
 /// Handling of participation requests via `Participation`.
 ///
-/// `Participation` provides an API (`Participation::queue_participation`) for queuing of dispute participations and will process those
-/// participation requests, such that most important/urgent disputes will be resolved and processed
-/// first and more importantly it will order requests in a way so disputes will get resolved, even
-/// if there are lots of them.
+/// `Participation` provides an API (`Participation::queue_participation`) for queuing of dispute
+/// participations and will process those participation requests, such that most important/urgent
+/// disputes will be resolved and processed first and more importantly it will order requests in a
+/// way so disputes will get resolved, even if there are lots of them.
 pub(crate) mod participation;
 
 /// Pure processing of vote imports.
diff --git a/node/core/dispute-coordinator/src/participation/queues/mod.rs b/node/core/dispute-coordinator/src/participation/queues/mod.rs
index 4d8ee585ea29..8a4374999f88 100644
--- a/node/core/dispute-coordinator/src/participation/queues/mod.rs
+++ b/node/core/dispute-coordinator/src/participation/queues/mod.rs
@@ -294,8 +294,8 @@ impl Queues {
 		return Self::pop_impl(&mut self.priority)
 	}
 
-	// `pop_best_effort` and `pop_priority` do the same but on different `BTreeMap`s. This function has
-	// the extracted implementation
+	// `pop_best_effort` and `pop_priority` do the same but on different `BTreeMap`s. This function
+	// has the extracted implementation
 	fn pop_impl(
 		target: &mut BTreeMap<CandidateComparator, ParticipationRequest>,
 	) -> Option<(CandidateComparator, ParticipationRequest)> {
@@ -331,9 +331,10 @@ impl Queues {
 #[derive(Copy, Clone)]
 #[cfg_attr(test, derive(Debug))]
 struct CandidateComparator {
-	/// Block number of the relay parent. It's wrapped in an `Option<>` because there are cases when
-	/// it can't be obtained. For example when the node is lagging behind and new leaves are received
-	/// with a slight delay. Candidates with unknown relay parent are treated with the lowest priority.
+	/// Block number of the relay parent. It's wrapped in an `Option<>` because there are cases
+	/// when it can't be obtained. For example when the node is lagging behind and new leaves are
+	/// received with a slight delay. Candidates with unknown relay parent are treated with the
+	/// lowest priority.
 	///
 	/// The order enforced by `CandidateComparator` is important because we want to participate in
 	/// the oldest disputes first.
@@ -346,9 +347,10 @@ struct CandidateComparator {
 	/// that is not stable. If a new fork appears after the fact, we would start ordering the same
 	/// candidate differently, which would result in the same candidate getting queued twice.
 	relay_parent_block_number: Option<BlockNumber>,
-	/// By adding the `CandidateHash`, we can guarantee a unique ordering across candidates with the
-	/// same relay parent block number. Candidates without `relay_parent_block_number` are ordered by
-	/// the `candidate_hash` (and treated with the lowest priority, as already mentioned).
+	/// By adding the `CandidateHash`, we can guarantee a unique ordering across candidates with
+	/// the same relay parent block number. Candidates without `relay_parent_block_number` are
+	/// ordered by the `candidate_hash` (and treated with the lowest priority, as already
+	/// mentioned).
 	candidate_hash: CandidateHash,
 }
 
@@ -364,11 +366,11 @@ impl CandidateComparator {
 	/// Create a candidate comparator for a given candidate.
 	///
 	/// Returns:
-	///	- `Ok(CandidateComparator{Some(relay_parent_block_number), candidate_hash})` when the
+	/// 	- `Ok(CandidateComparator{Some(relay_parent_block_number), candidate_hash})` when the
 	/// 	relay parent can be obtained. This is the happy case.
 	/// - `Ok(CandidateComparator{None, candidate_hash})` in case the candidate's relay parent
 	/// 	can't be obtained.
-	///	- `FatalError` in case the chain API call fails with an unexpected error.
+	/// 	- `FatalError` in case the chain API call fails with an unexpected error.
 	pub async fn new(
 		sender: &mut impl overseer::DisputeCoordinatorSenderTrait,
 		candidate: &CandidateReceipt,
diff --git a/node/core/dispute-coordinator/src/participation/queues/tests.rs b/node/core/dispute-coordinator/src/participation/queues/tests.rs
index 8293a935d11a..5e262d895e31 100644
--- a/node/core/dispute-coordinator/src/participation/queues/tests.rs
+++ b/node/core/dispute-coordinator/src/participation/queues/tests.rs
@@ -53,8 +53,8 @@ fn clone_request(request: &ParticipationRequest) -> ParticipationRequest {
 /// Check that dequeuing acknowledges order.
 ///
 /// Any priority item will be dequeued before any best effort items, priority and best effort with
-/// known parent block number items will be processed in order. Best effort items without known parent
-/// block number should be treated with lowest priority.
+/// known parent block number items will be processed in order. Best effort items without known
+/// parent block number should be treated with lowest priority.
 #[test]
 fn ordering_works_as_expected() {
 	let metrics = Metrics::default();
diff --git a/node/core/dispute-coordinator/src/participation/tests.rs b/node/core/dispute-coordinator/src/participation/tests.rs
index ab58db4e7628..32725a3ac658 100644
--- a/node/core/dispute-coordinator/src/participation/tests.rs
+++ b/node/core/dispute-coordinator/src/participation/tests.rs
@@ -305,7 +305,8 @@ fn reqs_get_queued_on_no_recent_block() {
 
 	// Responds to messages from the test and verifies its behaviour
 	let request_handler = async {
-		// If we receive `BlockNumber` request this implicitly proves that the participation is queued
+		// If we receive `BlockNumber` request this implicitly proves that the participation is
+		// queued
 		assert_matches!(
 			ctx_handle.recv().await,
 			AllMessages::ChainApi(ChainApiMessage::BlockNumber(_, tx)) => {
diff --git a/node/core/dispute-coordinator/src/scraping/candidates.rs b/node/core/dispute-coordinator/src/scraping/candidates.rs
index 89323907a732..38956700545c 100644
--- a/node/core/dispute-coordinator/src/scraping/candidates.rs
+++ b/node/core/dispute-coordinator/src/scraping/candidates.rs
@@ -98,7 +98,8 @@ mod ref_counted_candidates_tests {
 /// Keeps track of scraped candidates. Supports `insert`, `remove_up_to_height` and `contains`
 /// operations.
 pub struct ScrapedCandidates {
-	/// Main data structure which keeps the candidates we know about. `contains` does lookups only here.
+	/// Main data structure which keeps the candidates we know about. `contains` does lookups only
+	/// here.
 	candidates: RefCountedCandidates,
 	/// Keeps track at which block number a candidate was inserted. Used in `remove_up_to_height`.
 	/// Without this tracking we won't be able to remove all candidates before block X.
@@ -117,7 +118,8 @@ impl ScrapedCandidates {
 		self.candidates.contains(candidate_hash)
 	}
 
-	// Removes all candidates up to a given height. The candidates at the block height are NOT removed.
+	// Removes all candidates up to a given height. The candidates at the block height are NOT
+	// removed.
 	pub fn remove_up_to_height(&mut self, height: &BlockNumber) -> HashSet<CandidateHash> {
 		let mut candidates_modified: HashSet<CandidateHash> = HashSet::new();
 		let not_stale = self.candidates_by_block_number.split_off(&height);
diff --git a/node/core/dispute-coordinator/src/scraping/mod.rs b/node/core/dispute-coordinator/src/scraping/mod.rs
index a1e385b5ff85..f93ad0abab91 100644
--- a/node/core/dispute-coordinator/src/scraping/mod.rs
+++ b/node/core/dispute-coordinator/src/scraping/mod.rs
@@ -120,7 +120,8 @@ impl Inclusions {
 	) {
 		for candidate in candidates_modified {
 			if let Some(blocks_including) = self.inclusions_inner.get_mut(&candidate) {
-				// Returns everything after the given key, including the key. This works because the blocks are sorted in ascending order.
+				// Returns everything after the given key, including the key. This works because the
+				// blocks are sorted in ascending order.
 				*blocks_including = blocks_including.split_off(height);
 			}
 		}
@@ -150,8 +151,8 @@ impl Inclusions {
 ///
 /// Concretely:
 ///
-/// - Monitors for `CandidateIncluded` events to keep track of candidates that have been
-///   included on chains.
+/// - Monitors for `CandidateIncluded` events to keep track of candidates that have been included on
+///   chains.
 /// - Monitors for `CandidateBacked` events to keep track of all backed candidates.
 /// - Calls `FetchOnChainVotes` for each block to gather potentially missed votes from chain.
 ///
@@ -294,11 +295,11 @@ impl ChainScraper {
 
 	/// Prune finalized candidates.
 	///
-	/// We keep each candidate for `DISPUTE_CANDIDATE_LIFETIME_AFTER_FINALIZATION` blocks after finalization.
-	/// After that we treat it as low priority.
+	/// We keep each candidate for `DISPUTE_CANDIDATE_LIFETIME_AFTER_FINALIZATION` blocks after
+	/// finalization. After that we treat it as low priority.
 	pub fn process_finalized_block(&mut self, finalized_block_number: &BlockNumber) {
-		// `DISPUTE_CANDIDATE_LIFETIME_AFTER_FINALIZATION - 1` because `finalized_block_number`counts to the
-		// candidate lifetime.
+		// `DISPUTE_CANDIDATE_LIFETIME_AFTER_FINALIZATION - 1` because
+		// `finalized_block_number`counts to the candidate lifetime.
 		match finalized_block_number.checked_sub(DISPUTE_CANDIDATE_LIFETIME_AFTER_FINALIZATION - 1)
 		{
 			Some(key_to_prune) => {
diff --git a/node/core/dispute-coordinator/src/scraping/tests.rs b/node/core/dispute-coordinator/src/scraping/tests.rs
index 57e0731056b7..d938304a9e97 100644
--- a/node/core/dispute-coordinator/src/scraping/tests.rs
+++ b/node/core/dispute-coordinator/src/scraping/tests.rs
@@ -183,7 +183,8 @@ fn get_backed_candidate_event(block_number: BlockNumber) -> Vec<CandidateEvent>
 		GroupIndex::from(0),
 	)]
 }
-/// Hash for a 'magic' candidate. This is meant to be a special candidate used to verify special cases.
+/// Hash for a 'magic' candidate. This is meant to be a special candidate used to verify special
+/// cases.
 fn get_magic_candidate_hash() -> Hash {
 	BlakeTwo256::hash(&"abc".encode())
 }
@@ -425,7 +426,7 @@ fn scraper_requests_candidates_of_non_finalized_ancestors() {
 			&chain,
 			finalized_block_number,
 			BLOCKS_TO_SKIP -
-				(finalized_block_number - DISPUTE_CANDIDATE_LIFETIME_AFTER_FINALIZATION) as usize, // Expect the provider not to go past finalized block.
+				(finalized_block_number - DISPUTE_CANDIDATE_LIFETIME_AFTER_FINALIZATION) as usize, /* Expect the provider not to go past finalized block. */
 			get_backed_and_included_candidate_events,
 		);
 		join(process_active_leaves_update(ctx.sender(), &mut ordering, next_update), overseer_fut)
@@ -468,7 +469,8 @@ fn scraper_prunes_finalized_candidates() {
 
 		let candidate = make_candidate_receipt(get_block_number_hash(TEST_TARGET_BLOCK_NUMBER));
 
-		// After `DISPUTE_CANDIDATE_LIFETIME_AFTER_FINALIZATION` blocks the candidate should be removed
+		// After `DISPUTE_CANDIDATE_LIFETIME_AFTER_FINALIZATION` blocks the candidate should be
+		// removed
 		finalized_block_number =
 			TEST_TARGET_BLOCK_NUMBER + DISPUTE_CANDIDATE_LIFETIME_AFTER_FINALIZATION;
 		process_finalized_block(&mut scraper, &finalized_block_number);
@@ -518,8 +520,9 @@ fn scraper_handles_backed_but_not_included_candidate() {
 		finalized_block_number += 1;
 		process_finalized_block(&mut scraper, &finalized_block_number);
 
-		// `FIRST_TEST_BLOCK` is finalized, which is within `BACKED_CANDIDATE_LIFETIME_AFTER_FINALIZATION` window.
-		// The candidate should still be backed.
+		// `FIRST_TEST_BLOCK` is finalized, which is within
+		// `BACKED_CANDIDATE_LIFETIME_AFTER_FINALIZATION` window. The candidate should still be
+		// backed.
 		let candidate = make_candidate_receipt(get_block_number_hash(TEST_TARGET_BLOCK_NUMBER));
 		assert!(!scraper.is_candidate_included(&candidate.hash()));
 		assert!(scraper.is_candidate_backed(&candidate.hash()));
@@ -576,7 +579,8 @@ fn scraper_handles_the_same_candidate_incuded_in_two_different_block_heights() {
 			.await;
 
 		// Finalize blocks to enforce pruning of scraped events.
-		// The magic candidate was added twice, so it shouldn't be removed if we finalize two more blocks.
+		// The magic candidate was added twice, so it shouldn't be removed if we finalize two more
+		// blocks.
 		finalized_block_number = test_targets.first().expect("there are two block nums") +
 			DISPUTE_CANDIDATE_LIFETIME_AFTER_FINALIZATION;
 		process_finalized_block(&mut scraper, &finalized_block_number);
@@ -641,7 +645,8 @@ fn inclusions_per_candidate_properly_adds_and_prunes() {
 			])
 		);
 
-		// After `DISPUTE_CANDIDATE_LIFETIME_AFTER_FINALIZATION` blocks the earlier inclusion should be removed
+		// After `DISPUTE_CANDIDATE_LIFETIME_AFTER_FINALIZATION` blocks the earlier inclusion should
+		// be removed
 		finalized_block_number =
 			TEST_TARGET_BLOCK_NUMBER + DISPUTE_CANDIDATE_LIFETIME_AFTER_FINALIZATION;
 		process_finalized_block(&mut scraper, &finalized_block_number);
diff --git a/node/core/dispute-coordinator/src/tests.rs b/node/core/dispute-coordinator/src/tests.rs
index f2590aea1511..d0cf494d2d4d 100644
--- a/node/core/dispute-coordinator/src/tests.rs
+++ b/node/core/dispute-coordinator/src/tests.rs
@@ -734,8 +734,9 @@ fn too_many_unconfirmed_statements_are_considered_spam() {
 				.await;
 
 			// Participation has to fail here, otherwise the dispute will be confirmed. However
-			// participation won't happen at all because the dispute is neither backed, not confirmed
-			// nor the candidate is included. Or in other words - we'll refrain from participation.
+			// participation won't happen at all because the dispute is neither backed, not
+			// confirmed nor the candidate is included. Or in other words - we'll refrain from
+			// participation.
 
 			{
 				let (tx, rx) = oneshot::channel();
@@ -2050,7 +2051,8 @@ fn concluded_supermajority_against_non_active_after_time() {
 				ImportStatementsResult::ValidImport => {}
 			);
 
-			// Use a different expected commitments hash to ensure the candidate validation returns invalid.
+			// Use a different expected commitments hash to ensure the candidate validation returns
+			// invalid.
 			participation_with_distribution(
 				&mut virtual_overseer,
 				&candidate_hash,
@@ -2351,7 +2353,8 @@ fn resume_dispute_with_local_statement() {
 
 			assert_eq!(messages.len(), 1, "A message should have gone out.");
 
-			// Assert that subsystem is not sending Participation messages because we issued a local statement
+			// Assert that subsystem is not sending Participation messages because we issued a local
+			// statement
 			assert!(virtual_overseer.recv().timeout(TEST_TIMEOUT).await.is_none());
 
 			virtual_overseer.send(FromOrchestra::Signal(OverseerSignal::Conclude)).await;
@@ -2445,7 +2448,8 @@ fn resume_dispute_without_local_statement_or_local_key() {
 			Box::pin(async move {
 				test_state.handle_resume_sync(&mut virtual_overseer, session).await;
 
-				// Assert that subsystem is not sending Participation messages because we issued a local statement
+				// Assert that subsystem is not sending Participation messages because we issued a
+				// local statement
 				assert!(virtual_overseer.recv().timeout(TEST_TIMEOUT).await.is_none());
 
 				virtual_overseer.send(FromOrchestra::Signal(OverseerSignal::Conclude)).await;
@@ -2751,7 +2755,8 @@ fn redundant_votes_ignored() {
 }
 
 #[test]
-/// Make sure no disputes are recorded when there are no opposing votes, even if we reached supermajority.
+/// Make sure no disputes are recorded when there are no opposing votes, even if we reached
+/// supermajority.
 fn no_onesided_disputes() {
 	test_harness(|mut test_state, mut virtual_overseer| {
 		Box::pin(async move {
@@ -3124,16 +3129,17 @@ fn participation_requests_reprioritized_for_newly_included() {
 				candidate_receipt.descriptor.pov_hash = Hash::from(
 					[repetition; 32], // Altering this receipt so its hash will be changed
 				);
-				// Set consecutive parents (starting from zero). They will order the candidates for participation.
+				// Set consecutive parents (starting from zero). They will order the candidates for
+				// participation.
 				let parent_block_num: BlockNumber = repetition as BlockNumber - 1;
 				candidate_receipt.descriptor.relay_parent =
 					test_state.block_num_to_header.get(&parent_block_num).unwrap().clone();
 				receipts.push(candidate_receipt.clone());
 			}
 
-			// Mark all candidates as backed, so their participation requests make it to best effort.
-			// These calls must all occur before including the candidates due to test overseer
-			// oddities.
+			// Mark all candidates as backed, so their participation requests make it to best
+			// effort. These calls must all occur before including the candidates due to test
+			// overseer oddities.
 			let mut candidate_events = Vec::new();
 			for r in receipts.iter() {
 				candidate_events.push(make_candidate_backed_event(r.clone()))
@@ -3172,7 +3178,8 @@ fn participation_requests_reprioritized_for_newly_included() {
 					.await;
 
 				// Handle corresponding messages to unblock import
-				// we need to handle `ApprovalVotingMessage::GetApprovalSignaturesForCandidate` for import
+				// we need to handle `ApprovalVotingMessage::GetApprovalSignaturesForCandidate` for
+				// import
 				handle_approval_vote_request(
 					&mut virtual_overseer,
 					&candidate_hash,
@@ -3180,8 +3187,9 @@ fn participation_requests_reprioritized_for_newly_included() {
 				)
 				.await;
 
-				//  We'll trigger participation for the first `MAX_PARALLEL_PARTICIPATIONS` candidates.
-				// The rest will be queued => we need to handle `ChainApiMessage::BlockNumber` for them.
+				//  We'll trigger participation for the first `MAX_PARALLEL_PARTICIPATIONS`
+				// candidates. The rest will be queued => we need to handle
+				// `ChainApiMessage::BlockNumber` for them.
 				if idx >= crate::participation::MAX_PARALLEL_PARTICIPATIONS {
 					// We send the `idx` as parent block number, because it is used for ordering.
 					// This way we get predictable ordering and participation.
@@ -3201,11 +3209,13 @@ fn participation_requests_reprioritized_for_newly_included() {
 				)
 				.await;
 
-			// NB: The checks below are a bit racy. In theory candidate 2 can be processed even before candidate 0 and this is okay. If any
-			// of the asserts in the two functions after this comment fail -> rework `participation_with_distribution` to expect a set of
+			// NB: The checks below are a bit racy. In theory candidate 2 can be processed even
+			// before candidate 0 and this is okay. If any of the asserts in the two functions after
+			// this comment fail -> rework `participation_with_distribution` to expect a set of
 			// commitment hashes instead of just one.
 
-			// This is the candidate for which participation was started initially (`MAX_PARALLEL_PARTICIPATIONS` threshold was not yet hit)
+			// This is the candidate for which participation was started initially
+			// (`MAX_PARALLEL_PARTICIPATIONS` threshold was not yet hit)
 			participation_with_distribution(
 				&mut virtual_overseer,
 				&receipts.get(0).expect("There is more than one candidate").hash(),
@@ -3326,7 +3336,8 @@ fn informs_chain_selection_when_dispute_concluded_against() {
 				ImportStatementsResult::ValidImport => {}
 			);
 
-			// Use a different expected commitments hash to ensure the candidate validation returns invalid.
+			// Use a different expected commitments hash to ensure the candidate validation returns
+			// invalid.
 			participation_with_distribution(
 				&mut virtual_overseer,
 				&candidate_hash,
@@ -3440,7 +3451,8 @@ fn session_info_is_requested_only_once() {
 
 			test_state.handle_resume_sync(&mut virtual_overseer, session).await;
 
-			// This leaf activation shouldn't fetch `SessionInfo` because the session is already cached
+			// This leaf activation shouldn't fetch `SessionInfo` because the session is already
+			// cached
 			test_state
 				.activate_leaf_at_session(
 					&mut virtual_overseer,
@@ -3475,8 +3487,8 @@ fn session_info_is_requested_only_once() {
 	});
 }
 
-// Big jump means the new session we see with a leaf update is at least a `DISPUTE_WINDOW` bigger than
-// the already known one. In this case The whole `DISPUTE_WINDOW` should be fetched.
+// Big jump means the new session we see with a leaf update is at least a `DISPUTE_WINDOW` bigger
+// than the already known one. In this case The whole `DISPUTE_WINDOW` should be fetched.
 #[test]
 fn session_info_big_jump_works() {
 	test_harness(|mut test_state, mut virtual_overseer| {
@@ -3485,7 +3497,8 @@ fn session_info_big_jump_works() {
 
 			test_state.handle_resume_sync(&mut virtual_overseer, session_on_startup).await;
 
-			// This leaf activation shouldn't fetch `SessionInfo` because the session is already cached
+			// This leaf activation shouldn't fetch `SessionInfo` because the session is already
+			// cached
 			test_state
 				.activate_leaf_at_session(
 					&mut virtual_overseer,
@@ -3525,8 +3538,8 @@ fn session_info_big_jump_works() {
 	});
 }
 
-// Small jump means the new session we see with a leaf update is at less than last known one + `DISPUTE_WINDOW`. In this
-// case fetching should start from last known one + 1.
+// Small jump means the new session we see with a leaf update is at less than last known one +
+// `DISPUTE_WINDOW`. In this case fetching should start from last known one + 1.
 #[test]
 fn session_info_small_jump_works() {
 	test_harness(|mut test_state, mut virtual_overseer| {
@@ -3535,7 +3548,8 @@ fn session_info_small_jump_works() {
 
 			test_state.handle_resume_sync(&mut virtual_overseer, session_on_startup).await;
 
-			// This leaf activation shouldn't fetch `SessionInfo` because the session is already cached
+			// This leaf activation shouldn't fetch `SessionInfo` because the session is already
+			// cached
 			test_state
 				.activate_leaf_at_session(
 					&mut virtual_overseer,
diff --git a/node/core/parachains-inherent/src/lib.rs b/node/core/parachains-inherent/src/lib.rs
index f27481ee5a7d..3063147fb136 100644
--- a/node/core/parachains-inherent/src/lib.rs
+++ b/node/core/parachains-inherent/src/lib.rs
@@ -16,11 +16,12 @@
 
 //! The parachain inherent data provider
 //!
-//! Parachain backing and approval is an off-chain process, but the parachain needs to progress on chain as well. To
-//! make it progress on chain a block producer needs to forward information about the state of a parachain to the
-//! runtime. This information is forwarded through an inherent to the runtime. Here we provide the
-//! [`ParachainInherentDataProvider`] that requests the relevant data from the provisioner subsystem and creates the
-//! the inherent data that the runtime will use to create an inherent.
+//! Parachain backing and approval is an off-chain process, but the parachain needs to progress on
+//! chain as well. To make it progress on chain a block producer needs to forward information about
+//! the state of a parachain to the runtime. This information is forwarded through an inherent to
+//! the runtime. Here we provide the [`ParachainInherentDataProvider`] that requests the relevant
+//! data from the provisioner subsystem and creates the the inherent data that the runtime will use
+//! to create an inherent.
 
 #![deny(unused_crate_dependencies, unused_results)]
 
diff --git a/node/core/provisioner/src/disputes/mod.rs b/node/core/provisioner/src/disputes/mod.rs
index fab70a054698..2d8f6fb6e93b 100644
--- a/node/core/provisioner/src/disputes/mod.rs
+++ b/node/core/provisioner/src/disputes/mod.rs
@@ -14,7 +14,8 @@
 // You should have received a copy of the GNU General Public License
 // along with Polkadot.  If not, see <http://www.gnu.org/licenses/>.
 
-//! The disputes module is responsible for selecting dispute votes to be sent with the inherent data.
+//! The disputes module is responsible for selecting dispute votes to be sent with the inherent
+//! data.
 
 use crate::LOG_TARGET;
 use futures::channel::oneshot;
@@ -22,7 +23,8 @@ use polkadot_node_primitives::CandidateVotes;
 use polkadot_node_subsystem::{messages::DisputeCoordinatorMessage, overseer};
 use polkadot_primitives::{CandidateHash, SessionIndex};
 
-/// Request the relevant dispute statements for a set of disputes identified by `CandidateHash` and the `SessionIndex`.
+/// Request the relevant dispute statements for a set of disputes identified by `CandidateHash` and
+/// the `SessionIndex`.
 async fn request_votes(
 	sender: &mut impl overseer::ProvisionerSenderTrait,
 	disputes_to_query: Vec<(SessionIndex, CandidateHash)>,
diff --git a/node/core/provisioner/src/disputes/prioritized_selection/mod.rs b/node/core/provisioner/src/disputes/prioritized_selection/mod.rs
index 5c8aaad422f2..096b73d271a8 100644
--- a/node/core/provisioner/src/disputes/prioritized_selection/mod.rs
+++ b/node/core/provisioner/src/disputes/prioritized_selection/mod.rs
@@ -48,7 +48,8 @@ pub const MAX_DISPUTE_VOTES_FORWARDED_TO_RUNTIME: usize = 200;
 /// Controls how much dispute votes to be fetched from the `dispute-coordinator` per iteration in
 /// `fn vote_selection`. The purpose is to fetch the votes in batches until
 /// `MAX_DISPUTE_VOTES_FORWARDED_TO_RUNTIME` is reached. If all votes are fetched in single call
-/// we might fetch votes which we never use. This will create unnecessary load on `dispute-coordinator`.
+/// we might fetch votes which we never use. This will create unnecessary load on
+/// `dispute-coordinator`.
 ///
 /// This value should be less than `MAX_DISPUTE_VOTES_FORWARDED_TO_RUNTIME`. Increase it in case
 /// `provisioner` sends too many `QueryCandidateVotes` messages to `dispite-coordinator`.
@@ -68,22 +69,23 @@ const VOTES_SELECTION_BATCH_SIZE: usize = 11;
 ///   * Offchain vs Onchain
 ///   * Concluded onchain vs Unconcluded onchain
 ///
-/// Provisioner fetches all disputes from `dispute-coordinator` and separates them in multiple partitions.
-/// Please refer to `struct PartitionedDisputes` for details about the actual partitions.
-/// Each partition has got a priority implicitly assigned to it and the disputes are selected based on this
-/// priority (e.g. disputes in partition 1, then if there is space - disputes from partition 2 and so on).
+/// Provisioner fetches all disputes from `dispute-coordinator` and separates them in multiple
+/// partitions. Please refer to `struct PartitionedDisputes` for details about the actual
+/// partitions. Each partition has got a priority implicitly assigned to it and the disputes are
+/// selected based on this priority (e.g. disputes in partition 1, then if there is space - disputes
+/// from partition 2 and so on).
 ///
 /// # Votes selection
 ///
-/// Besides the prioritization described above the votes in each partition are filtered too. Provisioner
-/// fetches all onchain votes and filters them out from all partitions. As a result the Runtime receives
-/// only fresh votes (votes it didn't know about).
+/// Besides the prioritization described above the votes in each partition are filtered too.
+/// Provisioner fetches all onchain votes and filters them out from all partitions. As a result the
+/// Runtime receives only fresh votes (votes it didn't know about).
 ///
 /// # How the onchain votes are fetched
 ///
-/// The logic outlined above relies on `RuntimeApiRequest::Disputes` message from the Runtime. The user
-/// check the Runtime version before calling `select_disputes`. If the function is used with old runtime
-/// an error is logged and the logic will continue with empty onchain votes `HashMap`.
+/// The logic outlined above relies on `RuntimeApiRequest::Disputes` message from the Runtime. The
+/// user check the Runtime version before calling `select_disputes`. If the function is used with
+/// old runtime an error is logged and the logic will continue with empty onchain votes `HashMap`.
 pub async fn select_disputes<Sender>(
 	sender: &mut Sender,
 	metrics: &metrics::Metrics,
@@ -110,7 +112,8 @@ where
 			r
 		},
 		Err(GetOnchainDisputesError::NotSupported(runtime_api_err, relay_parent)) => {
-			// Runtime version is checked before calling this method, so the error below should never happen!
+			// Runtime version is checked before calling this method, so the error below should
+			// never happen!
 			gum::error!(
 				target: LOG_TARGET,
 				?runtime_api_err,
@@ -152,7 +155,8 @@ where
 	gum::trace!(target: LOG_TARGET, ?leaf, "Filtering recent disputes");
 
 	// Filter out unconfirmed disputes. However if the dispute is already onchain - don't skip it.
-	// In this case we'd better push as much fresh votes as possible to bring it to conclusion faster.
+	// In this case we'd better push as much fresh votes as possible to bring it to conclusion
+	// faster.
 	let recent_disputes = recent_disputes
 		.into_iter()
 		.filter(|d| d.2.is_confirmed_concluded() || onchain.contains_key(&(d.0, d.1)))
@@ -178,9 +182,9 @@ where
 	make_multi_dispute_statement_set(metrics, result)
 }
 
-/// Selects dispute votes from `PartitionedDisputes` which should be sent to the runtime. Votes which
-/// are already onchain are filtered out. Result should be sorted by `(SessionIndex, CandidateHash)`
-/// which is enforced by the `BTreeMap`. This is a requirement from the runtime.
+/// Selects dispute votes from `PartitionedDisputes` which should be sent to the runtime. Votes
+/// which are already onchain are filtered out. Result should be sorted by `(SessionIndex,
+/// CandidateHash)` which is enforced by the `BTreeMap`. This is a requirement from the runtime.
 async fn vote_selection<Sender>(
 	sender: &mut Sender,
 	partitioned: PartitionedDisputes,
@@ -237,9 +241,9 @@ where
 		for (session_index, candidate_hash, selected_votes) in votes {
 			let votes_len = selected_votes.valid.raw().len() + selected_votes.invalid.len();
 			if votes_len + total_votes_len > MAX_DISPUTE_VOTES_FORWARDED_TO_RUNTIME {
-				// we are done - no more votes can be added. Importantly, we don't add any votes for a dispute here
-				// if we can't fit them all. This gives us an important invariant, that backing votes for
-				// disputes make it into the provisioned vote set.
+				// we are done - no more votes can be added. Importantly, we don't add any votes for
+				// a dispute here if we can't fit them all. This gives us an important invariant,
+				// that backing votes for disputes make it into the provisioned vote set.
 				gum::trace!(
 					target: LOG_TARGET,
 					?request_votes_counter,
@@ -483,7 +487,8 @@ fn make_multi_dispute_statement_set(
 		.collect()
 }
 
-/// Gets the on-chain disputes at a given block number and returns them as a `HashMap` so that searching in them is cheap.
+/// Gets the on-chain disputes at a given block number and returns them as a `HashMap` so that
+/// searching in them is cheap.
 pub async fn get_onchain_disputes<Sender>(
 	sender: &mut Sender,
 	relay_parent: Hash,
diff --git a/node/core/provisioner/src/disputes/prioritized_selection/tests.rs b/node/core/provisioner/src/disputes/prioritized_selection/tests.rs
index 4ae67e3b7968..7798ebe51aaf 100644
--- a/node/core/provisioner/src/disputes/prioritized_selection/tests.rs
+++ b/node/core/provisioner/src/disputes/prioritized_selection/tests.rs
@@ -237,21 +237,22 @@ fn partitioning_happy_case() {
 	);
 }
 
-// This test verifies the double voting behavior. Currently we don't care if a supermajority is achieved with or
-// without the 'help' of a double vote (a validator voting for and against at the same time). This makes the test
-// a bit pointless but anyway I'm leaving it here to make this decision explicit and have the test code ready in
-// case this behavior needs to be further tested in the future.
-// Link to the PR with the discussions: https://github.com/paritytech/polkadot/pull/5567
+// This test verifies the double voting behavior. Currently we don't care if a supermajority is
+// achieved with or without the 'help' of a double vote (a validator voting for and against at the
+// same time). This makes the test a bit pointless but anyway I'm leaving it here to make this
+// decision explicit and have the test code ready in case this behavior needs to be further tested
+// in the future. Link to the PR with the discussions: https://github.com/paritytech/polkadot/pull/5567
 #[test]
 fn partitioning_doubled_onchain_vote() {
 	let mut input = Vec::<(SessionIndex, CandidateHash, DisputeStatus)>::new();
 	let mut onchain = HashMap::<(u32, CandidateHash), DisputeState>::new();
 
-	// Dispute A relies on a 'double onchain vote' to conclude. Validator with index 0 has voted both `for` and `against`.
-	// Despite that this dispute should be considered 'can conclude onchain'.
+	// Dispute A relies on a 'double onchain vote' to conclude. Validator with index 0 has voted
+	// both `for` and `against`. Despite that this dispute should be considered 'can conclude
+	// onchain'.
 	let dispute_a = (3, CandidateHash(Hash::random()), DisputeStatus::Active);
-	// Dispute B has supermajority + 1 votes, so the doubled onchain vote doesn't affect it. It should be considered
-	// as 'can conclude onchain'.
+	// Dispute B has supermajority + 1 votes, so the doubled onchain vote doesn't affect it. It
+	// should be considered as 'can conclude onchain'.
 	let dispute_b = (4, CandidateHash(Hash::random()), DisputeStatus::Active);
 	input.push(dispute_a.clone());
 	input.push(dispute_b.clone());
diff --git a/node/core/provisioner/src/error.rs b/node/core/provisioner/src/error.rs
index 0f1747995843..5645ed2762bc 100644
--- a/node/core/provisioner/src/error.rs
+++ b/node/core/provisioner/src/error.rs
@@ -81,7 +81,8 @@ pub enum Error {
 	OverseerExited(SubsystemError),
 }
 
-/// Used by `get_onchain_disputes` to represent errors related to fetching on-chain disputes from the Runtime
+/// Used by `get_onchain_disputes` to represent errors related to fetching on-chain disputes from
+/// the Runtime
 #[allow(dead_code)] // Remove when promoting to stable
 #[fatality::fatality]
 pub enum GetOnchainDisputesError {
diff --git a/node/core/provisioner/src/lib.rs b/node/core/provisioner/src/lib.rs
index 3ae297fee736..b5073763dfab 100644
--- a/node/core/provisioner/src/lib.rs
+++ b/node/core/provisioner/src/lib.rs
@@ -466,11 +466,11 @@ async fn send_inherent_data(
 /// - not more than one per validator
 /// - each 1 bit must correspond to an occupied core
 ///
-/// If we have too many, an arbitrary selection policy is fine. For purposes of maximizing availability,
-/// we pick the one with the greatest number of 1 bits.
+/// If we have too many, an arbitrary selection policy is fine. For purposes of maximizing
+/// availability, we pick the one with the greatest number of 1 bits.
 ///
-/// Note: This does not enforce any sorting precondition on the output; the ordering there will be unrelated
-/// to the sorting of the input.
+/// Note: This does not enforce any sorting precondition on the output; the ordering there will be
+/// unrelated to the sorting of the input.
 fn select_availability_bitfields(
 	cores: &[CoreState],
 	bitfields: &[SignedAvailabilityBitfield],
@@ -532,7 +532,8 @@ fn select_availability_bitfields(
 	selected.into_values().collect()
 }
 
-/// Determine which cores are free, and then to the degree possible, pick a candidate appropriate to each free core.
+/// Determine which cores are free, and then to the degree possible, pick a candidate appropriate to
+/// each free core.
 async fn select_candidates(
 	availability_cores: &[CoreState],
 	bitfields: &[SignedAvailabilityBitfield],
@@ -593,7 +594,8 @@ async fn select_candidates(
 
 		let computed_validation_data_hash = validation_data.hash();
 
-		// we arbitrarily pick the first of the backed candidates which match the appropriate selection criteria
+		// we arbitrarily pick the first of the backed candidates which match the appropriate
+		// selection criteria
 		if let Some(candidate) = candidates.iter().find(|backed_candidate| {
 			let descriptor = &backed_candidate.descriptor;
 			descriptor.para_id == scheduled_core.para_id &&
@@ -628,12 +630,12 @@ async fn select_candidates(
 	gum::trace!(target: LOG_TARGET, leaf_hash=?relay_parent,
 				"Got {} backed candidates", candidates.len());
 
-	// `selected_candidates` is generated in ascending order by core index, and `GetBackedCandidates`
-	// _should_ preserve that property, but let's just make sure.
+	// `selected_candidates` is generated in ascending order by core index, and
+	// `GetBackedCandidates` _should_ preserve that property, but let's just make sure.
 	//
-	// We can't easily map from `BackedCandidate` to `core_idx`, but we know that every selected candidate
-	// maps to either 0 or 1 backed candidate, and the hashes correspond. Therefore, by checking them
-	// in order, we can ensure that the backed candidates are also in order.
+	// We can't easily map from `BackedCandidate` to `core_idx`, but we know that every selected
+	// candidate maps to either 0 or 1 backed candidate, and the hashes correspond. Therefore, by
+	// checking them in order, we can ensure that the backed candidates are also in order.
 	let mut backed_idx = 0;
 	for selected in selected_candidates {
 		if selected ==
@@ -705,8 +707,9 @@ fn bitfields_indicate_availability(
 		let validator_idx = bitfield.validator_index().0 as usize;
 		match availability.get_mut(validator_idx) {
 			None => {
-				// in principle, this function might return a `Result<bool, Error>` so that we can more clearly express this error condition
-				// however, in practice, that would just push off an error-handling routine which would look a whole lot like this one.
+				// in principle, this function might return a `Result<bool, Error>` so that we can
+				// more clearly express this error condition however, in practice, that would just
+				// push off an error-handling routine which would look a whole lot like this one.
 				// simpler to just handle the error internally here.
 				gum::warn!(
 					target: LOG_TARGET,
@@ -726,8 +729,8 @@ fn bitfields_indicate_availability(
 	3 * availability.count_ones() >= 2 * availability.len()
 }
 
-// If we have to be absolutely precise here, this method gets the version of the `ParachainHost` api.
-// For brevity we'll just call it 'runtime version'.
+// If we have to be absolutely precise here, this method gets the version of the `ParachainHost`
+// api. For brevity we'll just call it 'runtime version'.
 async fn has_required_runtime(
 	sender: &mut impl overseer::ProvisionerSenderTrait,
 	relay_parent: Hash,
diff --git a/node/core/provisioner/src/metrics.rs b/node/core/provisioner/src/metrics.rs
index c65d999d04a7..fabbd798cf02 100644
--- a/node/core/provisioner/src/metrics.rs
+++ b/node/core/provisioner/src/metrics.rs
@@ -28,9 +28,10 @@ struct MetricsInner {
 	/// Bitfields array length in `ProvisionerInherentData` (the result for `RequestInherentData`)
 	inherent_data_response_bitfields: prometheus::Histogram,
 
-	/// The following metrics track how many disputes/votes the runtime will have to process. These will count
-	/// all recent statements meaning every dispute from last sessions: 10 min on Rococo, 60 min on Kusama and
-	/// 4 hours on Polkadot. The metrics are updated only when the node authors a block, so values vary across nodes.
+	/// The following metrics track how many disputes/votes the runtime will have to process. These
+	/// will count all recent statements meaning every dispute from last sessions: 10 min on
+	/// Rococo, 60 min on Kusama and 4 hours on Polkadot. The metrics are updated only when the
+	/// node authors a block, so values vary across nodes.
 	inherent_data_dispute_statement_sets: prometheus::Counter<prometheus::U64>,
 	inherent_data_dispute_statements: prometheus::CounterVec<prometheus::U64>,
 
diff --git a/node/core/provisioner/src/tests.rs b/node/core/provisioner/src/tests.rs
index e8692df8543a..4a469a43c893 100644
--- a/node/core/provisioner/src/tests.rs
+++ b/node/core/provisioner/src/tests.rs
@@ -90,7 +90,8 @@ mod select_availability_bitfields {
 		let cores = vec![occupied_core(0), occupied_core(1)];
 
 		// we pass in three bitfields with two validators
-		// this helps us check the postcondition that we get two bitfields back, for which the validators differ
+		// this helps us check the postcondition that we get two bitfields back, for which the
+		// validators differ
 		let bitfields = vec![
 			signed_bitfield(&keystore, bitvec.clone(), ValidatorIndex(0)),
 			signed_bitfield(&keystore, bitvec.clone(), ValidatorIndex(1)),
diff --git a/node/core/pvf-checker/src/lib.rs b/node/core/pvf-checker/src/lib.rs
index 222e85e36542..2946f3f78861 100644
--- a/node/core/pvf-checker/src/lib.rs
+++ b/node/core/pvf-checker/src/lib.rs
@@ -110,8 +110,8 @@ struct State {
 	///
 	/// Here are some fun facts about these futures:
 	///
-	/// - Pre-checking can take quite some time, in the matter of tens of seconds, so the futures here
-	///   can soak for quite some time.
+	/// - Pre-checking can take quite some time, in the matter of tens of seconds, so the futures
+	///   here can soak for quite some time.
 	/// - Pre-checking of one PVF can take drastically more time than pre-checking of another PVF.
 	///   This leads to results coming out of order.
 	///
diff --git a/node/core/pvf-checker/src/tests.rs b/node/core/pvf-checker/src/tests.rs
index 46e760936144..b223b1b54c0b 100644
--- a/node/core/pvf-checker/src/tests.rs
+++ b/node/core/pvf-checker/src/tests.rs
@@ -110,8 +110,8 @@ impl TestState {
 		Self { leaves, sessions, last_session_index }
 	}
 
-	/// A convenience function to receive a message from the overseer and returning `None` if nothing
-	/// was received within a reasonable (for local tests anyway) timeout.
+	/// A convenience function to receive a message from the overseer and returning `None` if
+	/// nothing was received within a reasonable (for local tests anyway) timeout.
 	async fn recv_timeout(&mut self, handle: &mut VirtualOverseer) -> Option<AllMessages> {
 		futures::select! {
 			msg = handle.recv().fuse() => {
diff --git a/node/core/pvf/common/src/error.rs b/node/core/pvf/common/src/error.rs
index 64d17800ac10..6eb0d9b7df42 100644
--- a/node/core/pvf/common/src/error.rs
+++ b/node/core/pvf/common/src/error.rs
@@ -18,8 +18,8 @@ use crate::prepare::PrepareStats;
 use parity_scale_codec::{Decode, Encode};
 use std::fmt;
 
-/// Result of PVF preparation performed by the validation host. Contains stats about the preparation if
-/// successful
+/// Result of PVF preparation performed by the validation host. Contains stats about the preparation
+/// if successful
 pub type PrepareResult = Result<PrepareStats, PrepareError>;
 
 /// An error that occurred during the prepare part of the PVF pipeline.
@@ -35,13 +35,15 @@ pub enum PrepareError {
 	Panic(String),
 	/// Failed to prepare the PVF due to the time limit.
 	TimedOut,
-	/// An IO error occurred. This state is reported by either the validation host or by the worker.
+	/// An IO error occurred. This state is reported by either the validation host or by the
+	/// worker.
 	IoErr(String),
-	/// The temporary file for the artifact could not be created at the given cache path. This state is reported by the
-	/// validation host (not by the worker).
+	/// The temporary file for the artifact could not be created at the given cache path. This
+	/// state is reported by the validation host (not by the worker).
 	CreateTmpFileErr(String),
-	/// The response from the worker is received, but the file cannot be renamed (moved) to the final destination
-	/// location. This state is reported by the validation host (not by the worker).
+	/// The response from the worker is received, but the file cannot be renamed (moved) to the
+	/// final destination location. This state is reported by the validation host (not by the
+	/// worker).
 	RenameTmpFileErr(String),
 }
 
@@ -81,15 +83,16 @@ impl fmt::Display for PrepareError {
 
 /// Some internal error occurred.
 ///
-/// Should only ever be used for validation errors independent of the candidate and PVF, or for errors we ruled out
-/// during pre-checking (so preparation errors are fine).
+/// Should only ever be used for validation errors independent of the candidate and PVF, or for
+/// errors we ruled out during pre-checking (so preparation errors are fine).
 #[derive(Debug, Clone, Encode, Decode)]
 pub enum InternalValidationError {
 	/// Some communication error occurred with the host.
 	HostCommunication(String),
 	/// Could not find or open compiled artifact file.
 	CouldNotOpenFile(String),
-	/// An error occurred in the CPU time monitor thread. Should be totally unrelated to validation.
+	/// An error occurred in the CPU time monitor thread. Should be totally unrelated to
+	/// validation.
 	CpuTimeMonitorThread(String),
 	/// Some non-deterministic preparation error occurred.
 	NonDeterministicPrepareError(PrepareError),
diff --git a/node/core/pvf/common/src/executor_intf.rs b/node/core/pvf/common/src/executor_intf.rs
index ef74e5f2ca92..42ed4b79c761 100644
--- a/node/core/pvf/common/src/executor_intf.rs
+++ b/node/core/pvf/common/src/executor_intf.rs
@@ -35,10 +35,10 @@ use std::any::{Any, TypeId};
 // left for the stack; this is, of course, overridable at link time when compiling the runtime)
 // plus the number of pages specified in the `extra_heap_pages` passed to the executor.
 //
-// By default, rustc (or `lld` specifically) should allocate 1 MiB for the shadow stack, or 16 pages.
-// The data section for runtimes are typically rather small and can fit in a single digit number of
-// WASM pages, so let's say an extra 16 pages. Thus let's assume that 32 pages or 2 MiB are used for
-// these needs by default.
+// By default, rustc (or `lld` specifically) should allocate 1 MiB for the shadow stack, or 16
+// pages. The data section for runtimes are typically rather small and can fit in a single digit
+// number of WASM pages, so let's say an extra 16 pages. Thus let's assume that 32 pages or 2 MiB
+// are used for these needs by default.
 const DEFAULT_HEAP_PAGES_ESTIMATE: u32 = 32;
 const EXTRA_HEAP_PAGES: u32 = 2048;
 
@@ -65,9 +65,9 @@ pub const DEFAULT_CONFIG: Config = Config {
 		//
 		// Here is how the values below were chosen.
 		//
-		// At the moment of writing, the default native stack size limit is 1 MiB. Assuming a logical item
-		// (see the docs about the field and the instrumentation algorithm) is 8 bytes, 1 MiB can
-		// fit 2x 65536 logical items.
+		// At the moment of writing, the default native stack size limit is 1 MiB. Assuming a
+		// logical item (see the docs about the field and the instrumentation algorithm) is 8 bytes,
+		// 1 MiB can fit 2x 65536 logical items.
 		//
 		// Since reaching the native stack limit is undesirable, we halve the logical item limit and
 		// also increase the native 256x. This hopefully should preclude wasm code from reaching
@@ -113,7 +113,7 @@ pub fn params_to_wasmtime_semantics(par: &ExecutorParams) -> Result<Semantics, S
 			ExecutorParam::WasmExtBulkMemory => sem.wasm_bulk_memory = true,
 			// TODO: Not implemented yet; <https://github.com/paritytech/polkadot/issues/6472>.
 			ExecutorParam::PrecheckingMaxMemory(_) => (),
-			ExecutorParam::PvfPrepTimeout(_, _) | ExecutorParam::PvfExecTimeout(_, _) => (), // Not used here
+			ExecutorParam::PvfPrepTimeout(_, _) | ExecutorParam::PvfExecTimeout(_, _) => (), /* Not used here */
 		}
 	}
 	sem.deterministic_stack_limit = Some(stack_limit);
@@ -135,8 +135,8 @@ impl Executor {
 		Ok(Self { config })
 	}
 
-	/// Executes the given PVF in the form of a compiled artifact and returns the result of execution
-	/// upon success.
+	/// Executes the given PVF in the form of a compiled artifact and returns the result of
+	/// execution upon success.
 	///
 	/// # Safety
 	///
diff --git a/node/core/pvf/common/src/worker/mod.rs b/node/core/pvf/common/src/worker/mod.rs
index 8dd99fc762d8..d9a0dff71b24 100644
--- a/node/core/pvf/common/src/worker/mod.rs
+++ b/node/core/pvf/common/src/worker/mod.rs
@@ -251,9 +251,9 @@ pub mod thread {
 		Arc::new((Mutex::new(WaitOutcome::Pending), Condvar::new()))
 	}
 
-	/// Runs a worker thread. Will first enable security features, and afterwards notify the threads waiting on the
-	/// condvar. Catches panics during execution and resumes the panics after triggering the condvar, so that the
-	/// waiting thread is notified on panics.
+	/// Runs a worker thread. Will first enable security features, and afterwards notify the threads
+	/// waiting on the condvar. Catches panics during execution and resumes the panics after
+	/// triggering the condvar, so that the waiting thread is notified on panics.
 	///
 	/// # Returns
 	///
diff --git a/node/core/pvf/execute-worker/src/lib.rs b/node/core/pvf/execute-worker/src/lib.rs
index c6ee515f9093..6f632a0ae95e 100644
--- a/node/core/pvf/execute-worker/src/lib.rs
+++ b/node/core/pvf/execute-worker/src/lib.rs
@@ -239,7 +239,8 @@ pub fn worker_entrypoint(
 					WaitOutcome::TimedOut => {
 						match cpu_time_monitor_thread.join() {
 							Ok(Some(cpu_time_elapsed)) => {
-								// Log if we exceed the timeout and the other thread hasn't finished.
+								// Log if we exceed the timeout and the other thread hasn't
+								// finished.
 								gum::warn!(
 									target: LOG_TARGET,
 									%worker_pid,
diff --git a/node/core/pvf/prepare-worker/src/lib.rs b/node/core/pvf/prepare-worker/src/lib.rs
index 3f60163c6196..caa7d33df12a 100644
--- a/node/core/pvf/prepare-worker/src/lib.rs
+++ b/node/core/pvf/prepare-worker/src/lib.rs
@@ -190,8 +190,9 @@ pub fn worker_entrypoint(
 
 						// If we are pre-checking, check for runtime construction errors.
 						//
-						// As pre-checking is more strict than just preparation in terms of memory and
-						// time, it is okay to do extra checks here. This takes negligible time anyway.
+						// As pre-checking is more strict than just preparation in terms of memory
+						// and time, it is okay to do extra checks here. This takes negligible time
+						// anyway.
 						if let PrepareJobKind::Prechecking = prepare_job_kind {
 							result = result.and_then(|output| {
 								runtime_construction_check(output.0.as_ref(), executor_params)?;
@@ -253,10 +254,11 @@ pub fn worker_entrypoint(
 
 								// Write the serialized artifact into a temp file.
 								//
-								// PVF host only keeps artifacts statuses in its memory, successfully
-								// compiled code gets stored on the disk (and consequently deserialized
-								// by execute-workers). The prepare worker is only required to send `Ok`
-								// to the pool to indicate the success.
+								// PVF host only keeps artifacts statuses in its memory,
+								// successfully compiled code gets stored on the disk (and
+								// consequently deserialized by execute-workers). The prepare worker
+								// is only required to send `Ok` to the pool to indicate the
+								// success.
 
 								gum::debug!(
 									target: LOG_TARGET,
@@ -275,7 +277,8 @@ pub fn worker_entrypoint(
 					WaitOutcome::TimedOut => {
 						match cpu_time_monitor_thread.join() {
 							Ok(Some(cpu_time_elapsed)) => {
-								// Log if we exceed the timeout and the other thread hasn't finished.
+								// Log if we exceed the timeout and the other thread hasn't
+								// finished.
 								gum::warn!(
 									target: LOG_TARGET,
 									%worker_pid,
diff --git a/node/core/pvf/prepare-worker/src/memory_stats.rs b/node/core/pvf/prepare-worker/src/memory_stats.rs
index e6dc8572c4a3..7904dfa9cb88 100644
--- a/node/core/pvf/prepare-worker/src/memory_stats.rs
+++ b/node/core/pvf/prepare-worker/src/memory_stats.rs
@@ -83,8 +83,8 @@ pub mod memory_tracker {
 	///
 	/// # Errors
 	///
-	/// For simplicity, any errors are returned as a string. As this is not a critical component, errors
-	/// are used for informational purposes (logging) only.
+	/// For simplicity, any errors are returned as a string. As this is not a critical component,
+	/// errors are used for informational purposes (logging) only.
 	pub fn memory_tracker_loop(condvar: thread::Cond) -> Result<MemoryAllocationStats, String> {
 		// NOTE: This doesn't need to be too fine-grained since preparation currently takes 3-10s or
 		// more. Apart from that, there is not really a science to this number.
diff --git a/node/core/pvf/src/artifacts.rs b/node/core/pvf/src/artifacts.rs
index 78d2f88941b8..a180af15db27 100644
--- a/node/core/pvf/src/artifacts.rs
+++ b/node/core/pvf/src/artifacts.rs
@@ -224,7 +224,8 @@ impl Artifacts {
 			.is_none());
 	}
 
-	/// Remove and retrieve the artifacts from the table that are older than the supplied Time-To-Live.
+	/// Remove and retrieve the artifacts from the table that are older than the supplied
+	/// Time-To-Live.
 	pub fn prune(&mut self, artifact_ttl: Duration) -> Vec<ArtifactId> {
 		let now = SystemTime::now();
 
diff --git a/node/core/pvf/src/error.rs b/node/core/pvf/src/error.rs
index 7372cd233c49..cb35ec9e9d9a 100644
--- a/node/core/pvf/src/error.rs
+++ b/node/core/pvf/src/error.rs
@@ -38,29 +38,30 @@ pub enum InvalidCandidate {
 	/// The worker has died during validation of a candidate. That may fall in one of the following
 	/// categories, which we cannot distinguish programmatically:
 	///
-	/// (a) Some sort of transient glitch caused the worker process to abort. An example would be that
-	///     the host machine ran out of free memory and the OOM killer started killing the processes,
-	///     and in order to save the parent it will "sacrifice child" first.
+	/// (a) Some sort of transient glitch caused the worker process to abort. An example would be
+	/// that the host machine ran out of free memory and the OOM killer started killing the
+	/// processes, and in order to save the parent it will "sacrifice child" first.
 	///
 	/// (b) The candidate triggered a code path that has lead to the process death. For example,
-	///     the PVF found a way to consume unbounded amount of resources and then it either exceeded
-	///     an `rlimit` (if set) or, again, invited OOM killer. Another possibility is a bug in
-	///     wasmtime allowed the PVF to gain control over the execution worker.
+	///     the PVF found a way to consume unbounded amount of resources and then it either
+	///     exceeded an `rlimit` (if set) or, again, invited OOM killer. Another possibility is a
+	///     bug in wasmtime allowed the PVF to gain control over the execution worker.
 	///
 	/// We attribute such an event to an *invalid candidate* in either case.
 	///
 	/// The rationale for this is that a glitch may lead to unfair rejecting candidate by a single
-	/// validator. If the glitch is somewhat more persistent the validator will reject all candidate
-	/// thrown at it and hopefully the operator notices it by decreased reward performance of the
-	/// validator. On the other hand, if the worker died because of (b) we would have better chances
-	/// to stop the attack.
+	/// validator. If the glitch is somewhat more persistent the validator will reject all
+	/// candidate thrown at it and hopefully the operator notices it by decreased reward
+	/// performance of the validator. On the other hand, if the worker died because of (b) we would
+	/// have better chances to stop the attack.
 	AmbiguousWorkerDeath,
 	/// PVF execution (compilation is not included) took more time than was allotted.
 	HardTimeout,
-	/// A panic occurred and we can't be sure whether the candidate is really invalid or some internal glitch occurred.
-	/// Whenever we are unsure, we can never treat an error as internal as we would abstain from voting. This is bad
-	/// because if the issue was due to the candidate, then all validators would abstain, stalling finality on the
-	/// chain. So we will first retry the candidate, and if the issue persists we are forced to vote invalid.
+	/// A panic occurred and we can't be sure whether the candidate is really invalid or some
+	/// internal glitch occurred. Whenever we are unsure, we can never treat an error as internal
+	/// as we would abstain from voting. This is bad because if the issue was due to the candidate,
+	/// then all validators would abstain, stalling finality on the chain. So we will first retry
+	/// the candidate, and if the issue persists we are forced to vote invalid.
 	Panic(String),
 }
 
diff --git a/node/core/pvf/src/execute/queue.rs b/node/core/pvf/src/execute/queue.rs
index 33a1c6f89709..acb260e25693 100644
--- a/node/core/pvf/src/execute/queue.rs
+++ b/node/core/pvf/src/execute/queue.rs
@@ -419,7 +419,8 @@ fn spawn_extra_worker(queue: &mut Queue, job: ExecuteJob) {
 /// beforehand. In such a way, a race condition is avoided: during the worker being spawned,
 /// another job in the queue, with an incompatible execution environment, may become stale, and
 /// the queue would have to kill a newly started worker and spawn another one.
-/// Nevertheless, if the worker finishes executing the job, it becomes idle and may be used to execute other jobs with a compatible execution environment.
+/// Nevertheless, if the worker finishes executing the job, it becomes idle and may be used to
+/// execute other jobs with a compatible execution environment.
 async fn spawn_worker_task(
 	program_path: PathBuf,
 	job: ExecuteJob,
diff --git a/node/core/pvf/src/execute/worker_intf.rs b/node/core/pvf/src/execute/worker_intf.rs
index 9d8b61d10447..948abd2261d7 100644
--- a/node/core/pvf/src/execute/worker_intf.rs
+++ b/node/core/pvf/src/execute/worker_intf.rs
@@ -74,8 +74,9 @@ pub enum Outcome {
 	/// PVF execution completed successfully and the result is returned. The worker is ready for
 	/// another job.
 	Ok { result_descriptor: ValidationResult, duration: Duration, idle_worker: IdleWorker },
-	/// The candidate validation failed. It may be for example because the wasm execution triggered a trap.
-	/// Errors related to the preparation process are not expected to be encountered by the execution workers.
+	/// The candidate validation failed. It may be for example because the wasm execution triggered
+	/// a trap. Errors related to the preparation process are not expected to be encountered by the
+	/// execution workers.
 	InvalidCandidate { err: String, idle_worker: IdleWorker },
 	/// An internal error happened during the validation. Such an error is most likely related to
 	/// some transient glitch.
@@ -95,7 +96,8 @@ pub enum Outcome {
 /// Given the idle token of a worker and parameters of work, communicates with the worker and
 /// returns the outcome.
 ///
-/// NOTE: Not returning the idle worker token in `Outcome` will trigger the child process being killed.
+/// NOTE: Not returning the idle worker token in `Outcome` will trigger the child process being
+/// killed.
 pub async fn start_work(
 	worker: IdleWorker,
 	artifact: ArtifactPathId,
diff --git a/node/core/pvf/src/host.rs b/node/core/pvf/src/host.rs
index a5772e34e16e..9f3b7e23fd89 100644
--- a/node/core/pvf/src/host.rs
+++ b/node/core/pvf/src/host.rs
@@ -455,8 +455,8 @@ async fn handle_precheck_pvf(
 			ArtifactState::Preparing { waiting_for_response, num_failures: _ } =>
 				waiting_for_response.push(result_sender),
 			ArtifactState::FailedToProcess { error, .. } => {
-				// Do not retry failed preparation if another pre-check request comes in. We do not retry pre-checking,
-				// anyway.
+				// Do not retry failed preparation if another pre-check request comes in. We do not
+				// retry pre-checking, anyway.
 				let _ = result_sender.send(PrepareResult::Err(error.clone()));
 			},
 		}
@@ -470,8 +470,8 @@ async fn handle_precheck_pvf(
 
 /// Handles PVF execution.
 ///
-/// This will try to prepare the PVF, if a prepared artifact does not already exist. If there is already a
-/// preparation job, we coalesce the two preparation jobs.
+/// This will try to prepare the PVF, if a prepared artifact does not already exist. If there is
+/// already a preparation job, we coalesce the two preparation jobs.
 ///
 /// If the prepare job succeeded previously, we will enqueue an execute job right away.
 ///
@@ -521,7 +521,8 @@ async fn handle_execute_pvf(
 						"handle_execute_pvf: Re-queuing PVF preparation for prepared artifact with missing file."
 					);
 
-					// The artifact has been prepared previously but the file is missing, prepare it again.
+					// The artifact has been prepared previously but the file is missing, prepare it
+					// again.
 					*state = ArtifactState::Preparing {
 						waiting_for_response: Vec::new(),
 						num_failures: 0,
@@ -721,8 +722,8 @@ async fn handle_prepare_done(
 		pending_requests
 	{
 		if result_tx.is_canceled() {
-			// Preparation could've taken quite a bit of time and the requester may be not interested
-			// in execution anymore, in which case we just skip the request.
+			// Preparation could've taken quite a bit of time and the requester may be not
+			// interested in execution anymore, in which case we just skip the request.
 			continue
 		}
 
@@ -855,8 +856,8 @@ fn can_retry_prepare_after_failure(
 		return false
 	}
 
-	// Retry if the retry cooldown has elapsed and if we have already retried less than `NUM_PREPARE_RETRIES` times. IO
-	// errors may resolve themselves.
+	// Retry if the retry cooldown has elapsed and if we have already retried less than
+	// `NUM_PREPARE_RETRIES` times. IO errors may resolve themselves.
 	SystemTime::now() >= last_time_failed + PREPARE_FAILURE_COOLDOWN &&
 		num_failures <= NUM_PREPARE_RETRIES
 }
diff --git a/node/core/pvf/src/lib.rs b/node/core/pvf/src/lib.rs
index eb6ab39ac500..1da0593835fb 100644
--- a/node/core/pvf/src/lib.rs
+++ b/node/core/pvf/src/lib.rs
@@ -32,26 +32,26 @@
 //! (a) PVF pre-checking. This takes the `Pvf` code and tries to prepare it (verify and
 //! compile) in order to pre-check its validity.
 //!
-//! (b) PVF execution. This accepts the PVF [`params`][`polkadot_parachain::primitives::ValidationParams`]
-//!     and the `Pvf` code, prepares (verifies and compiles) the code, and then executes PVF
-//!     with the `params`.
+//! (b) PVF execution. This accepts the PVF
+//! [`params`][`polkadot_parachain::primitives::ValidationParams`]     and the `Pvf` code, prepares
+//! (verifies and compiles) the code, and then executes PVF     with the `params`.
 //!
 //! (c) Heads up. This request allows to signal that the given PVF may be needed soon and that it
 //!     should be prepared for execution.
 //!
-//! The preparation results are cached for some time after they either used or was signaled in heads up.
-//! All requests that depends on preparation of the same PVF are bundled together and will be executed
-//! as soon as the artifact is prepared.
+//! The preparation results are cached for some time after they either used or was signaled in heads
+//! up. All requests that depends on preparation of the same PVF are bundled together and will be
+//! executed as soon as the artifact is prepared.
 //!
 //! # Priority
 //!
-//! PVF execution requests can specify the [priority][`Priority`] with which the given request should
-//! be handled. Different priority levels have different effects. This is discussed below.
+//! PVF execution requests can specify the [priority][`Priority`] with which the given request
+//! should be handled. Different priority levels have different effects. This is discussed below.
 //!
 //! Preparation started by a heads up signal always starts with the background priority. If there
-//! is already a request for that PVF preparation under way the priority is inherited. If after heads
-//! up, a new PVF execution request comes in with a higher priority, then the original task's priority
-//! will be adjusted to match the new one if it's larger.
+//! is already a request for that PVF preparation under way the priority is inherited. If after
+//! heads up, a new PVF execution request comes in with a higher priority, then the original task's
+//! priority will be adjusted to match the new one if it's larger.
 //!
 //! Priority can never go down, only up.
 //!
@@ -63,11 +63,11 @@
 //! dissimilar to actors. Each of such "processes" is a future task that contains an event loop that
 //! processes incoming messages, potentially delegating sub-tasks to other "processes".
 //!
-//! Two of these processes are queues. The first one is for preparation jobs and the second one is for
-//! execution. Both of the queues are backed by separate pools of workers of different kind.
+//! Two of these processes are queues. The first one is for preparation jobs and the second one is
+//! for execution. Both of the queues are backed by separate pools of workers of different kind.
 //!
-//! Preparation workers handle preparation requests by prevalidating and instrumenting PVF wasm code,
-//! and then passing it into the compiler, to prepare the artifact.
+//! Preparation workers handle preparation requests by prevalidating and instrumenting PVF wasm
+//! code, and then passing it into the compiler, to prepare the artifact.
 //!
 //! ## Artifacts
 //!
diff --git a/node/core/pvf/src/metrics.rs b/node/core/pvf/src/metrics.rs
index 62f8c6dc5157..3d792793498b 100644
--- a/node/core/pvf/src/metrics.rs
+++ b/node/core/pvf/src/metrics.rs
@@ -85,7 +85,8 @@ impl Metrics {
 
 			#[cfg(any(target_os = "linux", feature = "jemalloc-allocator"))]
 			if let Some(tracker_stats) = memory_stats.memory_tracker_stats {
-				// We convert these stats from B to KB to match the unit of `ru_maxrss` from `getrusage`.
+				// We convert these stats from B to KB to match the unit of `ru_maxrss` from
+				// `getrusage`.
 				let max_resident_kb = (tracker_stats.resident / 1024) as f64;
 				let max_allocated_kb = (tracker_stats.allocated / 1024) as f64;
 
diff --git a/node/core/pvf/src/prepare/pool.rs b/node/core/pvf/src/prepare/pool.rs
index 1e8ccc7365bf..92aa4896c263 100644
--- a/node/core/pvf/src/prepare/pool.rs
+++ b/node/core/pvf/src/prepare/pool.rs
@@ -61,9 +61,9 @@ pub enum ToPool {
 
 	/// Request the given worker to start working on the given code.
 	///
-	/// Once the job either succeeded or failed, a [`FromPool::Concluded`] message will be sent back.
-	/// It's also possible that the worker dies before handling the message in which case [`FromPool::Rip`]
-	/// will be sent back.
+	/// Once the job either succeeded or failed, a [`FromPool::Concluded`] message will be sent
+	/// back. It's also possible that the worker dies before handling the message in which case
+	/// [`FromPool::Rip`] will be sent back.
 	///
 	/// In either case, the worker is considered busy and no further `StartWork` messages should be
 	/// sent until either `Concluded` or `Rip` message is received.
@@ -237,8 +237,8 @@ fn handle_to_pool(
 					);
 				} else {
 					// idle token is present after spawn and after a job is concluded;
-					// the precondition for `StartWork` is it should be sent only if all previous work
-					// items concluded;
+					// the precondition for `StartWork` is it should be sent only if all previous
+					// work items concluded;
 					// thus idle token is Some;
 					// qed.
 					never!("unexpected absence of the idle token in prepare pool");
@@ -311,7 +311,8 @@ fn handle_mux(
 			match outcome {
 				Outcome::Concluded { worker: idle, result } =>
 					handle_concluded_no_rip(from_pool, spawned, worker, idle, result),
-				// Return `Concluded`, but do not kill the worker since the error was on the host side.
+				// Return `Concluded`, but do not kill the worker since the error was on the host
+				// side.
 				Outcome::CreateTmpFileErr { worker: idle, err } => handle_concluded_no_rip(
 					from_pool,
 					spawned,
@@ -319,7 +320,8 @@ fn handle_mux(
 					idle,
 					Err(PrepareError::CreateTmpFileErr(err)),
 				),
-				// Return `Concluded`, but do not kill the worker since the error was on the host side.
+				// Return `Concluded`, but do not kill the worker since the error was on the host
+				// side.
 				Outcome::RenameTmpFileErr { worker: idle, result: _, err } =>
 					handle_concluded_no_rip(
 						from_pool,
diff --git a/node/core/pvf/src/prepare/queue.rs b/node/core/pvf/src/prepare/queue.rs
index 5e19a4c7217a..c38012d74548 100644
--- a/node/core/pvf/src/prepare/queue.rs
+++ b/node/core/pvf/src/prepare/queue.rs
@@ -96,8 +96,9 @@ impl WorkerData {
 	}
 }
 
-/// A queue structured like this is prone to starving, however, we don't care that much since we expect
-/// there is going to be a limited number of critical jobs and we don't really care if background starve.
+/// A queue structured like this is prone to starving, however, we don't care that much since we
+/// expect there is going to be a limited number of critical jobs and we don't really care if
+/// background starve.
 #[derive(Default)]
 struct Unscheduled {
 	normal: VecDeque<Job>,
diff --git a/node/core/pvf/src/prepare/worker_intf.rs b/node/core/pvf/src/prepare/worker_intf.rs
index d0d9a026dda7..5280ab6b42a2 100644
--- a/node/core/pvf/src/prepare/worker_intf.rs
+++ b/node/core/pvf/src/prepare/worker_intf.rs
@@ -247,8 +247,8 @@ where
 
 	let outcome = f(tmp_file.clone(), stream).await;
 
-	// The function called above is expected to move `tmp_file` to a new location upon success. However,
-	// the function may as well fail and in that case we should remove the tmp file here.
+	// The function called above is expected to move `tmp_file` to a new location upon success.
+	// However, the function may as well fail and in that case we should remove the tmp file here.
 	//
 	// In any case, we try to remove the file here so that there are no leftovers. We only report
 	// errors that are different from the `NotFound`.
diff --git a/node/core/pvf/src/worker_intf.rs b/node/core/pvf/src/worker_intf.rs
index ef5733ec0e6d..795ad4524443 100644
--- a/node/core/pvf/src/worker_intf.rs
+++ b/node/core/pvf/src/worker_intf.rs
@@ -196,13 +196,15 @@ pub enum SpawnErr {
 	Handshake,
 }
 
-/// This is a representation of a potentially running worker. Drop it and the process will be killed.
+/// This is a representation of a potentially running worker. Drop it and the process will be
+/// killed.
 ///
 /// A worker's handle is also a future that resolves when it's detected that the worker's process
 /// has been terminated. Since the worker is running in another process it is obviously not
 /// necessary to poll this future to make the worker run, it's only for termination detection.
 ///
-/// This future relies on the fact that a child process's stdout `fd` is closed upon it's termination.
+/// This future relies on the fact that a child process's stdout `fd` is closed upon it's
+/// termination.
 #[pin_project]
 pub struct WorkerHandle {
 	child: process::Child,
@@ -240,15 +242,15 @@ impl WorkerHandle {
 			child_id,
 			stdout,
 			program: program.as_ref().to_path_buf(),
-			// We don't expect the bytes to be ever read. But in case we do, we should not use a buffer
-			// of a small size, because otherwise if the child process does return any data we will end up
-			// issuing a syscall for each byte. We also prefer not to do allocate that on the stack, since
-			// each poll the buffer will be allocated and initialized (and that's due `poll_read` takes &mut [u8]
-			// and there are no guarantees that a `poll_read` won't ever read from there even though that's
-			// unlikely).
+			// We don't expect the bytes to be ever read. But in case we do, we should not use a
+			// buffer of a small size, because otherwise if the child process does return any data
+			// we will end up issuing a syscall for each byte. We also prefer not to do allocate
+			// that on the stack, since each poll the buffer will be allocated and initialized (and
+			// that's due `poll_read` takes &mut [u8] and there are no guarantees that a `poll_read`
+			// won't ever read from there even though that's unlikely).
 			//
-			// OTOH, we also don't want to be super smart here and we could just afford to allocate a buffer
-			// for that here.
+			// OTOH, we also don't want to be super smart here and we could just afford to allocate
+			// a buffer for that here.
 			drop_box: vec![0; 8192].into_boxed_slice(),
 		})
 	}
@@ -280,8 +282,8 @@ impl futures::Future for WorkerHandle {
 				}
 			},
 			Err(err) => {
-				// The implementation is guaranteed to not to return `WouldBlock` and Interrupted. This
-				// leaves us with legit errors which we suppose were due to termination.
+				// The implementation is guaranteed to not to return `WouldBlock` and Interrupted.
+				// This leaves us with legit errors which we suppose were due to termination.
 
 				// Log the status code.
 				gum::debug!(
diff --git a/node/core/runtime-api/src/lib.rs b/node/core/runtime-api/src/lib.rs
index 252bb21b0edb..0ee5ca24ceee 100644
--- a/node/core/runtime-api/src/lib.rs
+++ b/node/core/runtime-api/src/lib.rs
@@ -321,7 +321,8 @@ where
 			return futures::pending!()
 		}
 
-		// If there are active requests, this will always resolve to `Some(_)` when a request is finished.
+		// If there are active requests, this will always resolve to `Some(_)` when a request is
+		// finished.
 		if let Some(Ok(Some(result))) = self.active_requests.next().await {
 			self.store_cache(result);
 		}
@@ -343,10 +344,10 @@ where
 {
 	loop {
 		// Let's add some back pressure when the subsystem is running at `MAX_PARALLEL_REQUESTS`.
-		// This can never block forever, because `active_requests` is owned by this task and any mutations
-		// happen either in `poll_requests` or `spawn_request` - so if `is_busy` returns true, then
-		// even if all of the requests finish before us calling `poll_requests` the `active_requests` length
-		// remains invariant.
+		// This can never block forever, because `active_requests` is owned by this task and any
+		// mutations happen either in `poll_requests` or `spawn_request` - so if `is_busy` returns
+		// true, then even if all of the requests finish before us calling `poll_requests` the
+		// `active_requests` length remains invariant.
 		if subsystem.is_busy() {
 			// Since we are not using any internal waiting queues, we need to wait for exactly
 			// one request to complete before we can read the next one from the overseer channel.
diff --git a/node/core/runtime-api/src/tests.rs b/node/core/runtime-api/src/tests.rs
index 27090a102ec2..33f5eef3869f 100644
--- a/node/core/runtime-api/src/tests.rs
+++ b/node/core/runtime-api/src/tests.rs
@@ -895,7 +895,8 @@ fn multiple_requests_in_parallel_are_working() {
 			receivers.push(rx);
 		}
 
-		// The backpressure from reaching `MAX_PARALLEL_REQUESTS` will make the test block, we need to drop the lock.
+		// The backpressure from reaching `MAX_PARALLEL_REQUESTS` will make the test block, we need
+		// to drop the lock.
 		drop(lock);
 
 		for _ in 0..MAX_PARALLEL_REQUESTS * 100 {
diff --git a/node/gum/src/lib.rs b/node/gum/src/lib.rs
index e989a15ae4e3..1cc4d8dec1cb 100644
--- a/node/gum/src/lib.rs
+++ b/node/gum/src/lib.rs
@@ -67,14 +67,13 @@
 //!
 //! Here's the rundown on how fields work:
 //!
-//! - Fields on spans and events are specified using the `syntax field_name =
-//!   field_value`.
-//! - Local variables may be used as field values without an assignment, similar to
-//!   struct initializers.
-//! - The `?` sigil is shorthand that specifies a field should be recorded using its
-//!   `fmt::Debug` implementation.
-//! - The `%` sigil operates similarly, but indicates that the value should be
-//!   recorded using its `fmt::Display` implementation.
+//! - Fields on spans and events are specified using the `syntax field_name = field_value`.
+//! - Local variables may be used as field values without an assignment, similar to struct
+//!   initializers.
+//! - The `?` sigil is shorthand that specifies a field should be recorded using its `fmt::Debug`
+//!   implementation.
+//! - The `%` sigil operates similarly, but indicates that the value should be recorded using its
+//!   `fmt::Display` implementation.
 //!
 //! For full details, again see [the tracing
 //! docs](https://docs.rs/tracing/latest/tracing/index.html#recording-fields).
diff --git a/node/jaeger/src/lib.rs b/node/jaeger/src/lib.rs
index 99222589d4ab..7de458606816 100644
--- a/node/jaeger/src/lib.rs
+++ b/node/jaeger/src/lib.rs
@@ -132,7 +132,8 @@ impl Jaeger {
 				match tokio::net::UdpSocket::bind("0.0.0.0:0").await {
 					Ok(udp_socket) => loop {
 						let buf = traces_out.next().await;
-						// UDP sending errors happen only either if the API is misused or in case of missing privilege.
+						// UDP sending errors happen only either if the API is misused or in case of
+						// missing privilege.
 						if let Err(e) = udp_socket.send_to(&buf, jaeger_agent).await {
 							log::debug!(target: "jaeger", "UDP send error: {}", e);
 						}
diff --git a/node/jaeger/src/spans.rs b/node/jaeger/src/spans.rs
index be8bf9cd5ddc..4038d41344f2 100644
--- a/node/jaeger/src/spans.rs
+++ b/node/jaeger/src/spans.rs
@@ -110,8 +110,8 @@ impl PerLeafSpan {
 	/// Creates a new instance.
 	///
 	/// Takes the `leaf_span` that is created by the overseer per leaf and a name for a child span.
-	/// Both will be stored in this object, while the child span is implicitly accessible by using the
-	/// [`Deref`](std::ops::Deref) implementation.
+	/// Both will be stored in this object, while the child span is implicitly accessible by using
+	/// the [`Deref`](std::ops::Deref) implementation.
 	pub fn new(leaf_span: Arc<Span>, name: &'static str) -> Self {
 		let span = leaf_span.child(name);
 
diff --git a/node/malus/src/variants/common.rs b/node/malus/src/variants/common.rs
index 4ea8b88b56a5..ab1dfbbb360a 100644
--- a/node/malus/src/variants/common.rs
+++ b/node/malus/src/variants/common.rs
@@ -125,8 +125,8 @@ where
 		Self { fake_validation, fake_validation_error, distribution, spawner }
 	}
 
-	/// Creates and sends the validation response for a given candidate. Queries the runtime to obtain the validation data for the
-	/// given candidate.
+	/// Creates and sends the validation response for a given candidate. Queries the runtime to
+	/// obtain the validation data for the given candidate.
 	pub fn send_validation_response<Sender>(
 		&self,
 		candidate_descriptor: CandidateDescriptor,
@@ -203,7 +203,8 @@ where
 {
 	type Message = CandidateValidationMessage;
 
-	// Capture all (approval and backing) candidate validation requests and depending on configuration fail them.
+	// Capture all (approval and backing) candidate validation requests and depending on
+	// configuration fail them.
 	fn intercept_incoming(
 		&self,
 		subsystem_sender: &mut Sender,
@@ -279,7 +280,8 @@ where
 					},
 					FakeCandidateValidation::ApprovalInvalid |
 					FakeCandidateValidation::BackingAndApprovalInvalid => {
-						// Set the validation result to invalid with probability `p` and trigger a dispute
+						// Set the validation result to invalid with probability `p` and trigger a
+						// dispute
 						let behave_maliciously = self.distribution.sample(&mut rand::thread_rng());
 						match behave_maliciously {
 							true => {
@@ -294,7 +296,8 @@ where
 									&validation_result,
 								);
 
-								// We're not even checking the candidate, this makes us appear faster than honest validators.
+								// We're not even checking the candidate, this makes us appear
+								// faster than honest validators.
 								sender.send(Ok(validation_result)).unwrap();
 								None
 							},
@@ -370,7 +373,8 @@ where
 								);
 								None
 							},
-							// If the `PoV` is malicious, we behave normally with some probability `(1-p)`
+							// If the `PoV` is malicious, we behave normally with some probability
+							// `(1-p)`
 							false => Some(FromOrchestra::Communication {
 								msg: CandidateValidationMessage::ValidateFromChainState(
 									candidate_receipt,
@@ -383,7 +387,8 @@ where
 					},
 					FakeCandidateValidation::BackingInvalid |
 					FakeCandidateValidation::BackingAndApprovalInvalid => {
-						// Maliciously set the validation result to invalid for a valid candidate with probability `p`
+						// Maliciously set the validation result to invalid for a valid candidate
+						// with probability `p`
 						let behave_maliciously = self.distribution.sample(&mut rand::thread_rng());
 						match behave_maliciously {
 							true => {
@@ -396,7 +401,8 @@ where
 									"😈 Maliciously sending invalid validation result: {:?}.",
 									&validation_result,
 								);
-								// We're not even checking the candidate, this makes us appear faster than honest validators.
+								// We're not even checking the candidate, this makes us appear
+								// faster than honest validators.
 								response_sender.send(Ok(validation_result)).unwrap();
 								None
 							},
diff --git a/node/malus/src/variants/dispute_valid_candidates.rs b/node/malus/src/variants/dispute_valid_candidates.rs
index ab1fba478beb..9ea8449a1d0b 100644
--- a/node/malus/src/variants/dispute_valid_candidates.rs
+++ b/node/malus/src/variants/dispute_valid_candidates.rs
@@ -45,14 +45,15 @@ use std::sync::Arc;
 #[command(rename_all = "kebab-case")]
 #[allow(missing_docs)]
 pub struct DisputeAncestorOptions {
-	/// Malicious candidate validation subsystem configuration. When enabled, node PVF execution is skipped
-	/// during backing and/or approval and it's result can by specified by this option and `--fake-validation-error`
-	/// for invalid candidate outcomes.
+	/// Malicious candidate validation subsystem configuration. When enabled, node PVF execution is
+	/// skipped during backing and/or approval and it's result can by specified by this option and
+	/// `--fake-validation-error` for invalid candidate outcomes.
 	#[arg(long, value_enum, ignore_case = true, default_value_t = FakeCandidateValidation::BackingAndApprovalInvalid)]
 	pub fake_validation: FakeCandidateValidation,
 
-	/// Applies only when `--fake-validation` is configured to reject candidates as invalid. It allows
-	/// to specify the exact error to return from the malicious candidate validation subsystem.
+	/// Applies only when `--fake-validation` is configured to reject candidates as invalid. It
+	/// allows to specify the exact error to return from the malicious candidate validation
+	/// subsystem.
 	#[arg(long, value_enum, ignore_case = true, default_value_t = FakeCandidateValidationError::InvalidOutputs)]
 	pub fake_validation_error: FakeCandidateValidationError,
 
diff --git a/node/malus/src/variants/suggest_garbage_candidate.rs b/node/malus/src/variants/suggest_garbage_candidate.rs
index 9fd8f6473bde..7d301c194b44 100644
--- a/node/malus/src/variants/suggest_garbage_candidate.rs
+++ b/node/malus/src/variants/suggest_garbage_candidate.rs
@@ -88,14 +88,15 @@ where
 					"Received request to second candidate",
 				);
 
-				// Need to draw value from Bernoulli distribution with given probability of success defined by the clap parameter.
-				// Note that clap parameter must be f64 since this is expected by the Bernoulli::new() function.
-				// It must be converted from u8, due to the lack of support for the .range() call on u64 in the clap crate.
+				// Need to draw value from Bernoulli distribution with given probability of success
+				// defined by the clap parameter. Note that clap parameter must be f64 since this is
+				// expected by the Bernoulli::new() function. It must be converted from u8, due to
+				// the lack of support for the .range() call on u64 in the clap crate.
 				let distribution = Bernoulli::new(self.percentage / 100.0)
 					.expect("Invalid probability! Percentage must be in range [0..=100].");
 
-				// Draw a random boolean from the Bernoulli distribution with probability of true equal to `p`.
-				// We use `rand::thread_rng` as the source of randomness.
+				// Draw a random boolean from the Bernoulli distribution with probability of true
+				// equal to `p`. We use `rand::thread_rng` as the source of randomness.
 				let generate_malicious_candidate = distribution.sample(&mut rand::thread_rng());
 
 				if generate_malicious_candidate == true {
diff --git a/node/metrics/src/lib.rs b/node/metrics/src/lib.rs
index 69b3771d696a..9cb0f289a580 100644
--- a/node/metrics/src/lib.rs
+++ b/node/metrics/src/lib.rs
@@ -19,7 +19,8 @@
 //! Collects a bunch of metrics providers and related features such as
 //! `Metronome` for usage with metrics collections.
 //!
-//! This crate also reexports Prometheus metric types which are expected to be implemented by subsystems.
+//! This crate also reexports Prometheus metric types which are expected to be implemented by
+//! subsystems.
 
 #![deny(missing_docs)]
 #![deny(unused_imports)]
diff --git a/node/network/approval-distribution/src/lib.rs b/node/network/approval-distribution/src/lib.rs
index bc85f54177cb..803a56251495 100644
--- a/node/network/approval-distribution/src/lib.rs
+++ b/node/network/approval-distribution/src/lib.rs
@@ -102,11 +102,13 @@ impl RecentlyOutdated {
 // Aggression has 3 levels:
 //
 //  * Aggression Level 0: The basic behaviors described above.
-//  * Aggression Level 1: The originator of a message sends to all peers. Other peers follow the rules above.
-//  * Aggression Level 2: All peers send all messages to all their row and column neighbors.
-//    This means that each validator will, on average, receive each message approximately `2*sqrt(n)` times.
-// The aggression level of messages pertaining to a block increases when that block is unfinalized and
-// is a child of the finalized block.
+//  * Aggression Level 1: The originator of a message sends to all peers. Other peers follow the
+//    rules above.
+//  * Aggression Level 2: All peers send all messages to all their row and column neighbors. This
+//    means that each validator will, on average, receive each message approximately `2*sqrt(n)`
+//    times.
+// The aggression level of messages pertaining to a block increases when that block is unfinalized
+// and is a child of the finalized block.
 // This means that only one block at a time has its messages propagated with aggression > 0.
 //
 // A note on aggression thresholds: changes in propagation apply only to blocks which are the
@@ -120,7 +122,8 @@ impl RecentlyOutdated {
 struct AggressionConfig {
 	/// Aggression level 1: all validators send all their own messages to all peers.
 	l1_threshold: Option<BlockNumber>,
-	/// Aggression level 2: level 1 + all validators send all messages to all peers in the X and Y dimensions.
+	/// Aggression level 2: level 1 + all validators send all messages to all peers in the X and Y
+	/// dimensions.
 	l2_threshold: Option<BlockNumber>,
 	/// How often to re-send messages to all targeted recipients.
 	/// This applies to all unfinalized blocks.
@@ -167,11 +170,12 @@ struct State {
 	blocks: HashMap<Hash, BlockEntry>,
 
 	/// Our view updates to our peers can race with `NewBlocks` updates. We store messages received
-	/// against the directly mentioned blocks in our view in this map until `NewBlocks` is received.
+	/// against the directly mentioned blocks in our view in this map until `NewBlocks` is
+	/// received.
 	///
-	/// As long as the parent is already in the `blocks` map and `NewBlocks` messages aren't delayed
-	/// by more than a block length, this strategy will work well for mitigating the race. This is
-	/// also a race that occurs typically on local networks.
+	/// As long as the parent is already in the `blocks` map and `NewBlocks` messages aren't
+	/// delayed by more than a block length, this strategy will work well for mitigating the race.
+	/// This is also a race that occurs typically on local networks.
 	pending_known: HashMap<Hash, Vec<(PeerId, PendingMessage)>>,
 
 	/// Peer data is partially stored here, and partially inline within the [`BlockEntry`]s
@@ -947,7 +951,8 @@ impl State {
 			}
 		}
 
-		// Invariant: to our knowledge, none of the peers except for the `source` know about the assignment.
+		// Invariant: to our knowledge, none of the peers except for the `source` know about the
+		// assignment.
 		metrics.on_assignment_imported();
 
 		let topology = self.topologies.get_topology(entry.session);
@@ -1239,7 +1244,8 @@ impl State {
 			}
 		}
 
-		// Invariant: to our knowledge, none of the peers except for the `source` know about the approval.
+		// Invariant: to our knowledge, none of the peers except for the `source` know about the
+		// approval.
 		metrics.on_approval_imported();
 
 		let required_routing = match entry.candidates.get_mut(candidate_index as usize) {
@@ -1925,9 +1931,9 @@ const fn ensure_size_not_zero(size: usize) -> usize {
 }
 
 /// The maximum amount of assignments per batch is 33% of maximum allowed by protocol.
-/// This is an arbitrary value. Bumping this up increases the maximum amount of approvals or assignments
-/// we send in a single message to peers. Exceeding `MAX_NOTIFICATION_SIZE` will violate the protocol
-/// configuration.
+/// This is an arbitrary value. Bumping this up increases the maximum amount of approvals or
+/// assignments we send in a single message to peers. Exceeding `MAX_NOTIFICATION_SIZE` will violate
+/// the protocol configuration.
 pub const MAX_ASSIGNMENT_BATCH_SIZE: usize = ensure_size_not_zero(
 	MAX_NOTIFICATION_SIZE as usize /
 		std::mem::size_of::<(IndirectAssignmentCert, CandidateIndex)>() /
diff --git a/node/network/availability-distribution/src/requester/fetch_task/mod.rs b/node/network/availability-distribution/src/requester/fetch_task/mod.rs
index f87e1888bb10..191ee2acd973 100644
--- a/node/network/availability-distribution/src/requester/fetch_task/mod.rs
+++ b/node/network/availability-distribution/src/requester/fetch_task/mod.rs
@@ -315,7 +315,8 @@ impl RunningTask {
 					continue
 				},
 			};
-			// We drop the span so that the span is not active whilst we validate and store the chunk.
+			// We drop the span so that the span is not active whilst we validate and store the
+			// chunk.
 			drop(_chunk_recombine_span);
 			let _chunk_validate_and_store_span = span
 				.child("validate-and-store-chunk")
diff --git a/node/network/availability-distribution/src/requester/mod.rs b/node/network/availability-distribution/src/requester/mod.rs
index e27f40982ae8..446988f7cc0d 100644
--- a/node/network/availability-distribution/src/requester/mod.rs
+++ b/node/network/availability-distribution/src/requester/mod.rs
@@ -114,8 +114,8 @@ impl Requester {
 				.with_string_tag("leaf", format!("{:?}", leaf.hash))
 				.with_stage(jaeger::Stage::AvailabilityDistribution);
 
-			// Order important! We need to handle activated, prior to deactivated, otherwise we might
-			// cancel still needed jobs.
+			// Order important! We need to handle activated, prior to deactivated, otherwise we
+			// might cancel still needed jobs.
 			self.start_requesting_chunks(ctx, runtime, leaf, &span).await?;
 		}
 
@@ -168,8 +168,8 @@ impl Requester {
 			// any tasks separately.
 			//
 			// The next time the subsystem receives leaf update, some of spawned task will be bumped
-			// to be live in fresh relay parent, while some might get dropped due to the current leaf
-			// being deactivated.
+			// to be live in fresh relay parent, while some might get dropped due to the current
+			// leaf being deactivated.
 			self.add_cores(ctx, runtime, leaf, leaf_session_index, cores, span).await?;
 		}
 
@@ -177,7 +177,6 @@ impl Requester {
 	}
 
 	/// Stop requesting chunks for obsolete heads.
-	///
 	fn stop_requesting_chunks(&mut self, obsolete_leaves: impl Iterator<Item = Hash>) {
 		let obsolete_leaves: HashSet<_> = obsolete_leaves.collect();
 		self.fetches.retain(|_, task| {
@@ -226,10 +225,10 @@ impl Requester {
 						.with_session_info(
 							context,
 							runtime,
-							// We use leaf here, the relay_parent must be in the same session as the
-							// leaf. This is guaranteed by runtime which ensures that cores are cleared
-							// at session boundaries. At the same time, only leaves are guaranteed to
-							// be fetchable by the state trie.
+							// We use leaf here, the relay_parent must be in the same session as
+							// the leaf. This is guaranteed by runtime which ensures that cores are
+							// cleared at session boundaries. At the same time, only leaves are
+							// guaranteed to be fetchable by the state trie.
 							leaf,
 							leaf_session_index,
 							|info| FetchTaskConfig::new(leaf, &core, tx, metrics, info, span),
diff --git a/node/network/availability-recovery/src/futures_undead.rs b/node/network/availability-recovery/src/futures_undead.rs
index 225f6693a725..04ef3e749399 100644
--- a/node/network/availability-recovery/src/futures_undead.rs
+++ b/node/network/availability-recovery/src/futures_undead.rs
@@ -23,7 +23,6 @@
 //! was almost done, thus we would have wasted time with our impatience. By simply making them
 //! not count towards length, we can make sure to have enough "live" requests ongoing, while at the
 //! same time taking advantage of some maybe "late" response from the undead.
-//!
 
 use std::{
 	pin::Pin,
diff --git a/node/network/availability-recovery/src/lib.rs b/node/network/availability-recovery/src/lib.rs
index e8503ee454a2..fb0cdb720571 100644
--- a/node/network/availability-recovery/src/lib.rs
+++ b/node/network/availability-recovery/src/lib.rs
@@ -111,7 +111,8 @@ const SMALL_POV_LIMIT: usize = 128 * 1024;
 pub enum RecoveryStrategy {
 	/// We always try the backing group first, then fallback to validator chunks.
 	BackersFirstAlways,
-	/// We try the backing group first if PoV size is lower than specified, then fallback to validator chunks.
+	/// We try the backing group first if PoV size is lower than specified, then fallback to
+	/// validator chunks.
 	BackersFirstIfSizeLower(usize),
 	/// We always recover using validator chunks.
 	ChunksAlways,
@@ -132,7 +133,8 @@ impl RecoveryStrategy {
 		}
 	}
 
-	/// Returns the PoV size limit in bytes for `BackersFirstIfSizeLower` strategy, otherwise `None`.
+	/// Returns the PoV size limit in bytes for `BackersFirstIfSizeLower` strategy, otherwise
+	/// `None`.
 	pub fn pov_size_limit(&self) -> Option<usize> {
 		match *self {
 			RecoveryStrategy::BackersFirstIfSizeLower(limit) => Some(limit),
@@ -165,8 +167,8 @@ struct RequestChunksFromValidators {
 	///
 	/// including failed ones.
 	total_received_responses: usize,
-	/// a random shuffling of the validators which indicates the order in which we connect to the validators and
-	/// request the chunk from them.
+	/// a random shuffling of the validators which indicates the order in which we connect to the
+	/// validators and request the chunk from them.
 	shuffling: VecDeque<ValidatorIndex>,
 	/// Chunks received so far.
 	received_chunks: HashMap<ValidatorIndex, ErasureChunk>,
@@ -215,7 +217,8 @@ enum ErasureTask {
 		HashMap<ValidatorIndex, ErasureChunk>,
 		oneshot::Sender<Result<AvailableData, ErasureEncodingError>>,
 	),
-	/// Re-encode `AvailableData` into erasure chunks in order to verify the provided root hash of the Merkle tree.
+	/// Re-encode `AvailableData` into erasure chunks in order to verify the provided root hash of
+	/// the Merkle tree.
 	Reencode(usize, Hash, AvailableData, oneshot::Sender<Option<AvailableData>>),
 }
 
@@ -808,8 +811,8 @@ where
 		self.params.metrics.on_recovery_started();
 
 		loop {
-			// These only fail if we cannot reach the underlying subsystem, which case there is nothing
-			// meaningful we can do.
+			// These only fail if we cannot reach the underlying subsystem, which case there is
+			// nothing meaningful we can do.
 			match self.source {
 				Source::RequestFromBackers(ref mut from_backers) => {
 					match from_backers.run(&self.params, &mut self.sender).await {
@@ -1008,7 +1011,8 @@ async fn launch_recovery_task<Context>(
 			);
 
 			backing_group = backing_group.filter(|_| {
-				// We keep the backing group only if `1/3` of chunks sum up to less than `small_pov_limit`.
+				// We keep the backing group only if `1/3` of chunks sum up to less than
+				// `small_pov_limit`.
 				prefer_backing_group
 			});
 		}
@@ -1194,18 +1198,21 @@ impl AvailabilityRecoverySubsystem {
 		let (erasure_task_tx, erasure_task_rx) = futures::channel::mpsc::channel(16);
 		let mut erasure_task_rx = erasure_task_rx.fuse();
 
-		// `ThreadPoolBuilder` spawns the tasks using `spawn_blocking`. For each worker there will be a `mpsc` channel created.
-		// Each of these workers take the `Receiver` and poll it in an infinite loop.
-		// All of the sender ends of the channel are sent as a vec which we then use to create a `Cycle` iterator.
-		// We use this iterator to assign work in a round-robin fashion to the workers in the pool.
+		// `ThreadPoolBuilder` spawns the tasks using `spawn_blocking`. For each worker there will
+		// be a `mpsc` channel created. Each of these workers take the `Receiver` and poll it in an
+		// infinite loop. All of the sender ends of the channel are sent as a vec which we then use
+		// to create a `Cycle` iterator. We use this iterator to assign work in a round-robin
+		// fashion to the workers in the pool.
 		//
 		// How work is dispatched to the pool from the recovery tasks:
-		// - Once a recovery task finishes retrieving the availability data, it needs to reconstruct from chunks and/or
+		// - Once a recovery task finishes retrieving the availability data, it needs to reconstruct
+		//   from chunks and/or
 		// re-encode the data which are heavy CPU computations.
-		// To do so it sends an `ErasureTask` to the main loop via the `erasure_task` channel, and waits for the results
-		// over a `oneshot` channel.
+		// To do so it sends an `ErasureTask` to the main loop via the `erasure_task` channel, and
+		// waits for the results over a `oneshot` channel.
 		// - In the subsystem main loop we poll the `erasure_task_rx` receiver.
-		// - We forward the received `ErasureTask` to the `next()` sender yielded by the `Cycle` iterator.
+		// - We forward the received `ErasureTask` to the `next()` sender yielded by the `Cycle`
+		//   iterator.
 		// - Some worker thread handles it and sends the response over the `oneshot` channel.
 
 		// Create a thread pool with 2 workers.
@@ -1348,11 +1355,13 @@ impl ThreadPoolBuilder {
 	// Creates a pool of `size` workers, where 1 <= `size` <= `MAX_THREADS`.
 	//
 	// Each worker is created by `spawn_blocking` and takes the receiver side of a channel
-	// while all of the senders are returned to the caller. Each worker runs `erasure_task_thread` that
-	// polls the `Receiver` for an `ErasureTask` which is expected to be CPU intensive. The larger
-	// the input (more or larger chunks/availability data), the more CPU cycles will be spent.
+	// while all of the senders are returned to the caller. Each worker runs `erasure_task_thread`
+	// that polls the `Receiver` for an `ErasureTask` which is expected to be CPU intensive. The
+	// larger the input (more or larger chunks/availability data), the more CPU cycles will be
+	// spent.
 	//
-	// For example, for 32KB PoVs, we'd expect re-encode to eat as much as 90ms and 500ms for 2.5MiB.
+	// For example, for 32KB PoVs, we'd expect re-encode to eat as much as 90ms and 500ms for
+	// 2.5MiB.
 	//
 	// After executing such a task, the worker sends the response via a provided `oneshot` sender.
 	//
diff --git a/node/network/availability-recovery/src/tests.rs b/node/network/availability-recovery/src/tests.rs
index 26a99e91a5e2..c5647a12f589 100644
--- a/node/network/availability-recovery/src/tests.rs
+++ b/node/network/availability-recovery/src/tests.rs
@@ -817,7 +817,8 @@ fn wrong_chunk_index_leads_to_recovery_error() {
 
 		let candidate_hash = test_state.candidate.hash();
 
-		// These chunks should fail the index check as they don't have the correct index for validator.
+		// These chunks should fail the index check as they don't have the correct index for
+		// validator.
 		test_state.chunks[1] = test_state.chunks[0].clone();
 		test_state.chunks[2] = test_state.chunks[0].clone();
 		test_state.chunks[3] = test_state.chunks[0].clone();
diff --git a/node/network/bridge/src/rx/mod.rs b/node/network/bridge/src/rx/mod.rs
index 11a2dc6be83a..950bb3d6e6da 100644
--- a/node/network/bridge/src/rx/mod.rs
+++ b/node/network/bridge/src/rx/mod.rs
@@ -14,7 +14,8 @@
 // You should have received a copy of the GNU General Public License
 // along with Polkadot.  If not, see <http://www.gnu.org/licenses/>.
 
-//! The Network Bridge Subsystem - handles _incoming_ messages from the network, forwarded to the relevant subsystems.
+//! The Network Bridge Subsystem - handles _incoming_ messages from the network, forwarded to the
+//! relevant subsystems.
 use super::*;
 
 use always_assert::never;
@@ -86,7 +87,8 @@ pub struct NetworkBridgeRx<N, AD> {
 }
 
 impl<N, AD> NetworkBridgeRx<N, AD> {
-	/// Create a new network bridge subsystem with underlying network service and authority discovery service.
+	/// Create a new network bridge subsystem with underlying network service and authority
+	/// discovery service.
 	///
 	/// This assumes that the network service has had the notifications protocol for the network
 	/// bridge already registered. See [`peers_sets_info`](peers_sets_info).
diff --git a/node/network/bridge/src/rx/tests.rs b/node/network/bridge/src/rx/tests.rs
index 078f6591ae2a..e18a7e541832 100644
--- a/node/network/bridge/src/rx/tests.rs
+++ b/node/network/bridge/src/rx/tests.rs
@@ -795,8 +795,9 @@ fn peer_messages_sent_via_overseer() {
 
 		network_handle.disconnect_peer(peer.clone(), PeerSet::Validation).await;
 
-		// Approval distribution message comes first, and the message is only sent to that subsystem.
-		// then a disconnection event arises that is sent to all validation networking subsystems.
+		// Approval distribution message comes first, and the message is only sent to that
+		// subsystem. then a disconnection event arises that is sent to all validation networking
+		// subsystems.
 
 		assert_matches!(
 			virtual_overseer.recv().await,
diff --git a/node/network/bridge/src/tx/mod.rs b/node/network/bridge/src/tx/mod.rs
index 2b54f6f0f06d..93916dd70fec 100644
--- a/node/network/bridge/src/tx/mod.rs
+++ b/node/network/bridge/src/tx/mod.rs
@@ -61,7 +61,8 @@ pub struct NetworkBridgeTx<N, AD> {
 }
 
 impl<N, AD> NetworkBridgeTx<N, AD> {
-	/// Create a new network bridge subsystem with underlying network service and authority discovery service.
+	/// Create a new network bridge subsystem with underlying network service and authority
+	/// discovery service.
 	///
 	/// This assumes that the network service has had the notifications protocol for the network
 	/// bridge already registered. See [`peers_sets_info`](peers_sets_info).
diff --git a/node/network/bridge/src/validator_discovery.rs b/node/network/bridge/src/validator_discovery.rs
index 098416c5b88d..d4d1df3da467 100644
--- a/node/network/bridge/src/validator_discovery.rs
+++ b/node/network/bridge/src/validator_discovery.rs
@@ -106,9 +106,10 @@ impl<N: Network, AD: AuthorityDiscovery> Service<N, AD> {
 	/// It will ask the network to connect to the validators and not disconnect
 	/// from them at least until the next request is issued for the same peer set.
 	///
-	/// This method will also disconnect from previously connected validators not in the `validator_ids` set.
-	/// it takes `network_service` and `authority_discovery_service` by value
-	/// and returns them as a workaround for the Future: Send requirement imposed by async function implementation.
+	/// This method will also disconnect from previously connected validators not in the
+	/// `validator_ids` set. it takes `network_service` and `authority_discovery_service` by value
+	/// and returns them as a workaround for the Future: Send requirement imposed by async function
+	/// implementation.
 	pub async fn on_request(
 		&mut self,
 		validator_ids: Vec<AuthorityDiscoveryId>,
diff --git a/node/network/collator-protocol/src/collator_side/mod.rs b/node/network/collator-protocol/src/collator_side/mod.rs
index 39b23c152cbb..e4adfdc9d941 100644
--- a/node/network/collator-protocol/src/collator_side/mod.rs
+++ b/node/network/collator-protocol/src/collator_side/mod.rs
@@ -225,8 +225,8 @@ struct State {
 	/// Our validator groups per active leaf.
 	our_validators_groups: HashMap<Hash, ValidatorGroup>,
 
-	/// The mapping from [`PeerId`] to [`HashSet<AuthorityDiscoveryId>`]. This is filled over time as we learn the [`PeerId`]'s
-	/// by `PeerConnected` events.
+	/// The mapping from [`PeerId`] to [`HashSet<AuthorityDiscoveryId>`]. This is filled over time
+	/// as we learn the [`PeerId`]'s by `PeerConnected` events.
 	peer_ids: HashMap<PeerId, HashSet<AuthorityDiscoveryId>>,
 
 	/// Tracks which validators we want to stay connected to.
@@ -241,8 +241,8 @@ struct State {
 
 	/// All collation fetching requests that are still waiting to be answered.
 	///
-	/// They are stored per relay parent, when our view changes and the relay parent moves out, we will cancel the fetch
-	/// request.
+	/// They are stored per relay parent, when our view changes and the relay parent moves out, we
+	/// will cancel the fetch request.
 	waiting_collation_fetches: HashMap<Hash, WaitingCollationFetches>,
 
 	/// Active collation fetches.
@@ -526,8 +526,8 @@ async fn connect_to_validators<Context>(
 
 /// Advertise collation to the given `peer`.
 ///
-/// This will only advertise a collation if there exists one for the given `relay_parent` and the given `peer` is
-/// set as validator for our para at the given `relay_parent`.
+/// This will only advertise a collation if there exists one for the given `relay_parent` and the
+/// given `peer` is set as validator for our para at the given `relay_parent`.
 #[overseer::contextbounds(CollatorProtocol, prefix = self::overseer)]
 async fn advertise_collation<Context>(
 	ctx: &mut Context,
@@ -638,7 +638,8 @@ async fn process_msg<Context>(
 			);
 		},
 		NetworkBridgeUpdate(event) => {
-			// We should count only this shoulder in the histogram, as other shoulders are just introducing noise
+			// We should count only this shoulder in the histogram, as other shoulders are just
+			// introducing noise
 			let _ = state.metrics.time_process_msg();
 
 			if let Err(e) = handle_network_msg(ctx, runtime, state, event).await {
diff --git a/node/network/collator-protocol/src/collator_side/tests.rs b/node/network/collator-protocol/src/collator_side/tests.rs
index 757ef813a3d0..e406e5d869cc 100644
--- a/node/network/collator-protocol/src/collator_side/tests.rs
+++ b/node/network/collator-protocol/src/collator_side/tests.rs
@@ -160,8 +160,8 @@ impl TestState {
 
 	/// Generate a new relay parent and inform the subsystem about the new view.
 	///
-	/// If `merge_views == true` it means the subsystem will be informed that we are working on the old `relay_parent`
-	/// and the new one.
+	/// If `merge_views == true` it means the subsystem will be informed that we are working on the
+	/// old `relay_parent` and the new one.
 	async fn advance_to_new_round(
 		&mut self,
 		virtual_overseer: &mut VirtualOverseer,
@@ -901,7 +901,8 @@ fn collate_on_two_different_relay_chain_blocks() {
 
 			let old_relay_parent = test_state.relay_parent;
 
-			// Advance to a new round, while informing the subsystem that the old and the new relay parent are active.
+			// Advance to a new round, while informing the subsystem that the old and the new relay
+			// parent are active.
 			test_state.advance_to_new_round(virtual_overseer, true).await;
 
 			distribute_collation(virtual_overseer, &test_state, true).await;
@@ -1085,7 +1086,8 @@ where
 				.await
 				.unwrap();
 
-			// Keep the feedback channel alive because we need to use it to inform about the finished transfer.
+			// Keep the feedback channel alive because we need to use it to inform about the
+			// finished transfer.
 			let feedback_tx = assert_matches!(
 				rx.await,
 				Ok(full_response) => {
diff --git a/node/network/collator-protocol/src/collator_side/validators_buffer.rs b/node/network/collator-protocol/src/collator_side/validators_buffer.rs
index 851923a6d0d4..13ed3f66e0f1 100644
--- a/node/network/collator-protocol/src/collator_side/validators_buffer.rs
+++ b/node/network/collator-protocol/src/collator_side/validators_buffer.rs
@@ -23,9 +23,9 @@
 //! We keep a simple FIFO buffer of N validator groups and a bitvec for each advertisement,
 //! 1 indicating we want to be connected to i-th validator in a buffer, 0 otherwise.
 //!
-//! The bit is set to 1 for the whole **group** whenever it's inserted into the buffer. Given a relay
-//! parent, one can reset a bit back to 0 for particular **validator**. For example, if a collation
-//! was fetched or some timeout has been hit.
+//! The bit is set to 1 for the whole **group** whenever it's inserted into the buffer. Given a
+//! relay parent, one can reset a bit back to 0 for particular **validator**. For example, if a
+//! collation was fetched or some timeout has been hit.
 //!
 //! The bitwise OR over known advertisements gives us validators indices for connection request.
 
diff --git a/node/network/collator-protocol/src/validator_side/tests.rs b/node/network/collator-protocol/src/validator_side/tests.rs
index a2e92e8c78d2..47409e8d10f3 100644
--- a/node/network/collator-protocol/src/validator_side/tests.rs
+++ b/node/network/collator-protocol/src/validator_side/tests.rs
@@ -730,7 +730,8 @@ fn reject_connection_to_next_group() {
 	})
 }
 
-// Ensure that we fetch a second collation, after the first checked collation was found to be invalid.
+// Ensure that we fetch a second collation, after the first checked collation was found to be
+// invalid.
 #[test]
 fn fetch_next_collation_on_invalid_collation() {
 	let test_state = TestState::default();
diff --git a/node/network/dispute-distribution/src/lib.rs b/node/network/dispute-distribution/src/lib.rs
index a39f78358f44..ad99bc41fa64 100644
--- a/node/network/dispute-distribution/src/lib.rs
+++ b/node/network/dispute-distribution/src/lib.rs
@@ -60,8 +60,8 @@ use self::sender::{DisputeSender, DisputeSenderMessage};
 
 /// ## The receiver [`DisputesReceiver`]
 ///
-/// The receiving side is implemented as `DisputesReceiver` and is run as a separate long running task within
-/// this subsystem ([`DisputesReceiver::run`]).
+/// The receiving side is implemented as `DisputesReceiver` and is run as a separate long running
+/// task within this subsystem ([`DisputesReceiver::run`]).
 ///
 /// Conceptually all the receiver has to do, is waiting for incoming requests which are passed in
 /// via a dedicated channel and forwarding them to the dispute coordinator via
@@ -101,8 +101,8 @@ const LOG_TARGET: &'static str = "parachain::dispute-distribution";
 
 /// Rate limit on the `receiver` side.
 ///
-/// If messages from one peer come in at a higher rate than every `RECEIVE_RATE_LIMIT` on average, we
-/// start dropping messages from that peer to enforce that limit.
+/// If messages from one peer come in at a higher rate than every `RECEIVE_RATE_LIMIT` on average,
+/// we start dropping messages from that peer to enforce that limit.
 pub const RECEIVE_RATE_LIMIT: Duration = Duration::from_millis(100);
 
 /// Rate limit on the `sender` side.
diff --git a/node/network/dispute-distribution/src/receiver/batches/batch.rs b/node/network/dispute-distribution/src/receiver/batches/batch.rs
index 75f37107dff9..11380b7c072e 100644
--- a/node/network/dispute-distribution/src/receiver/batches/batch.rs
+++ b/node/network/dispute-distribution/src/receiver/batches/batch.rs
@@ -192,8 +192,8 @@ impl Batch {
 
 	/// Calculate when the next tick should happen.
 	///
-	/// This will usually return `now + BATCH_COLLECTING_INTERVAL`, except if the lifetime of this batch
-	/// would exceed `MAX_BATCH_LIFETIME`.
+	/// This will usually return `now + BATCH_COLLECTING_INTERVAL`, except if the lifetime of this
+	/// batch would exceed `MAX_BATCH_LIFETIME`.
 	///
 	/// # Arguments
 	///
diff --git a/node/network/dispute-distribution/src/receiver/batches/waiting_queue.rs b/node/network/dispute-distribution/src/receiver/batches/waiting_queue.rs
index 72f6e80a26a4..9a5e665a5756 100644
--- a/node/network/dispute-distribution/src/receiver/batches/waiting_queue.rs
+++ b/node/network/dispute-distribution/src/receiver/batches/waiting_queue.rs
@@ -50,8 +50,8 @@ impl<Payload: Eq + Ord> WaitingQueue<Payload> {
 
 	/// Push a `PendingWake`.
 	///
-	/// The next call to `wait_ready` will make sure to wake soon enough to process that new event in a
-	/// timely manner.
+	/// The next call to `wait_ready` will make sure to wake soon enough to process that new event
+	/// in a timely manner.
 	pub fn push(&mut self, wake: PendingWake<Payload>) {
 		self.pending_wakes.push(wake);
 		// Reset timer as it is potentially obsolete now:
diff --git a/node/network/dispute-distribution/src/receiver/mod.rs b/node/network/dispute-distribution/src/receiver/mod.rs
index ed108a67fac3..827a77281ccb 100644
--- a/node/network/dispute-distribution/src/receiver/mod.rs
+++ b/node/network/dispute-distribution/src/receiver/mod.rs
@@ -382,11 +382,11 @@ where
 				if let Err(pending_response) = batch_result {
 					// We don't expect honest peers to send redundant votes within a single batch,
 					// as the timeout for retry is much higher. Still we don't want to punish the
-					// node as it might not be the node's fault. Some other (malicious) node could have been
-					// faster sending the same votes in order to harm the reputation of that honest
-					// node. Given that we already have a rate limit, if a validator chooses to
-					// waste available rate with redundant votes - so be it. The actual dispute
-					// resolution is unaffected.
+					// node as it might not be the node's fault. Some other (malicious) node could
+					// have been faster sending the same votes in order to harm the reputation of
+					// that honest node. Given that we already have a rate limit, if a validator
+					// chooses to waste available rate with redundant votes - so be it. The actual
+					// dispute resolution is unaffected.
 					gum::debug!(
 						target: LOG_TARGET,
 						?peer,
diff --git a/node/network/dispute-distribution/src/sender/send_task.rs b/node/network/dispute-distribution/src/sender/send_task.rs
index fcd670ff9ce9..18c66066d162 100644
--- a/node/network/dispute-distribution/src/sender/send_task.rs
+++ b/node/network/dispute-distribution/src/sender/send_task.rs
@@ -45,8 +45,8 @@ use crate::{
 ///
 /// The unit of work for a `SendTask` is an authority/validator.
 pub struct SendTask<M> {
-	/// The request we are supposed to get out to all `parachain` validators of the dispute's session
-	/// and to all current authorities.
+	/// The request we are supposed to get out to all `parachain` validators of the dispute's
+	/// session and to all current authorities.
 	request: DisputeRequest,
 
 	/// The set of authorities we need to send our messages to. This set will change at session
@@ -185,7 +185,8 @@ impl<M: 'static + Send + Sync> SendTask<M> {
 
 	/// Handle a finished response waiting task.
 	///
-	/// Called by `DisputeSender` upon reception of the corresponding message from our spawned `wait_response_task`.
+	/// Called by `DisputeSender` upon reception of the corresponding message from our spawned
+	/// `wait_response_task`.
 	pub fn on_finished_send(&mut self, authority: &AuthorityDiscoveryId, result: TaskResult) {
 		match result {
 			TaskResult::Failed(err) => {
@@ -204,8 +205,8 @@ impl<M: 'static + Send + Sync> SendTask<M> {
 			TaskResult::Succeeded => {
 				let status = match self.deliveries.get_mut(&authority) {
 					None => {
-						// Can happen when a sending became irrelevant while the response was already
-						// queued.
+						// Can happen when a sending became irrelevant while the response was
+						// already queued.
 						gum::debug!(
 							target: LOG_TARGET,
 							candidate = ?self.request.0.candidate_receipt.hash(),
diff --git a/node/network/gossip-support/src/lib.rs b/node/network/gossip-support/src/lib.rs
index 62a071aa6f4c..3c178ad9dfa5 100644
--- a/node/network/gossip-support/src/lib.rs
+++ b/node/network/gossip-support/src/lib.rs
@@ -246,7 +246,8 @@ where
 				{
 					let mut connections = authorities_past_present_future(sender, leaf).await?;
 
-					// Remove all of our locally controlled validator indices so we don't connect to ourself.
+					// Remove all of our locally controlled validator indices so we don't connect to
+					// ourself.
 					let connections =
 						if remove_all_controlled(&self.keystore, &mut connections) != 0 {
 							connections
diff --git a/node/network/protocol/src/grid_topology.rs b/node/network/protocol/src/grid_topology.rs
index 1b356f67617b..99dd513c4d79 100644
--- a/node/network/protocol/src/grid_topology.rs
+++ b/node/network/protocol/src/grid_topology.rs
@@ -17,17 +17,20 @@
 //! Grid topology support implementation
 //! The basic operation of the 2D grid topology is that:
 //!   * A validator producing a message sends it to its row-neighbors and its column-neighbors
-//!   * A validator receiving a message originating from one of its row-neighbors sends it to its column-neighbors
-//!   * A validator receiving a message originating from one of its column-neighbors sends it to its row-neighbors
+//!   * A validator receiving a message originating from one of its row-neighbors sends it to its
+//!     column-neighbors
+//!   * A validator receiving a message originating from one of its column-neighbors sends it to its
+//!     row-neighbors
 //!
-//! This grid approach defines 2 unique paths for every validator to reach every other validator in at most 2 hops.
+//! This grid approach defines 2 unique paths for every validator to reach every other validator in
+//! at most 2 hops.
 //!
 //! However, we also supplement this with some degree of random propagation:
 //! every validator, upon seeing a message for the first time, propagates it to 8 random peers.
 //! This inserts some redundancy in case the grid topology isn't working or is being attacked -
 //! an adversary doesn't know which peers a validator will send to.
-//! This is combined with the property that the adversary doesn't know which validators will elect to check a block.
-//!
+//! This is combined with the property that the adversary doesn't know which validators will elect
+//! to check a block.
 
 use crate::PeerId;
 use polkadot_primitives::{AuthorityDiscoveryId, SessionIndex, ValidatorIndex};
@@ -188,7 +191,8 @@ impl GridNeighbors {
 			(false, false) => RequiredRouting::None,
 			(true, false) => RequiredRouting::GridY, // messages from X go to Y
 			(false, true) => RequiredRouting::GridX, // messages from Y go to X
-			(true, true) => RequiredRouting::GridXY, // if the grid works as expected, this shouldn't happen.
+			(true, true) => RequiredRouting::GridXY, /* if the grid works as expected, this
+			                                           * shouldn't happen. */
 		}
 	}
 
@@ -213,7 +217,8 @@ impl GridNeighbors {
 					"Grid topology is unexpected, play it safe and send to X AND Y"
 				);
 				RequiredRouting::GridXY
-			}, // if the grid works as expected, this shouldn't happen.
+			}, /* if the grid works as expected, this
+			                                           * shouldn't happen. */
 		}
 	}
 
diff --git a/node/network/protocol/src/lib.rs b/node/network/protocol/src/lib.rs
index 948c422a82f8..2df926ac55d8 100644
--- a/node/network/protocol/src/lib.rs
+++ b/node/network/protocol/src/lib.rs
@@ -91,7 +91,8 @@ impl Into<sc_network::ObservedRole> for ObservedRole {
 
 /// Specialized wrapper around [`View`].
 ///
-/// Besides the access to the view itself, it also gives access to the [`jaeger::Span`] per leave/head.
+/// Besides the access to the view itself, it also gives access to the [`jaeger::Span`] per
+/// leave/head.
 #[derive(Debug, Clone, Default)]
 pub struct OurView {
 	view: View,
@@ -131,7 +132,8 @@ impl std::ops::Deref for OurView {
 	}
 }
 
-/// Construct a new [`OurView`] with the given chain heads, finalized number 0 and disabled [`jaeger::Span`]'s.
+/// Construct a new [`OurView`] with the given chain heads, finalized number 0 and disabled
+/// [`jaeger::Span`]'s.
 ///
 /// NOTE: Use for tests only.
 ///
diff --git a/node/network/protocol/src/peer_set.rs b/node/network/protocol/src/peer_set.rs
index ce47ac30811a..b9fa80d5c4a2 100644
--- a/node/network/protocol/src/peer_set.rs
+++ b/node/network/protocol/src/peer_set.rs
@@ -98,7 +98,8 @@ impl PeerSet {
 				max_notification_size,
 				handshake: None,
 				set_config: SetConfig {
-					// Non-authority nodes don't need to accept incoming connections on this peer set:
+					// Non-authority nodes don't need to accept incoming connections on this peer
+					// set:
 					in_peers: if is_authority == IsAuthority::Yes { 100 } else { 0 },
 					out_peers: 0,
 					reserved_nodes: Vec::new(),
diff --git a/node/network/protocol/src/request_response/incoming/mod.rs b/node/network/protocol/src/request_response/incoming/mod.rs
index e2b8ad526488..445544838672 100644
--- a/node/network/protocol/src/request_response/incoming/mod.rs
+++ b/node/network/protocol/src/request_response/incoming/mod.rs
@@ -78,8 +78,8 @@ where
 	/// reputation changes in that case.
 	///
 	/// Params:
-	///		- The raw request to decode
-	///		- Reputation changes to apply for the peer in case decoding fails.
+	/// 		- The raw request to decode
+	/// 		- Reputation changes to apply for the peer in case decoding fails.
 	fn try_from_raw(
 		raw: sc_network::config::IncomingRequest,
 		reputation_changes: Vec<UnifiedReputationChange>,
diff --git a/node/network/protocol/src/request_response/mod.rs b/node/network/protocol/src/request_response/mod.rs
index d895a90079cc..912447c0c626 100644
--- a/node/network/protocol/src/request_response/mod.rs
+++ b/node/network/protocol/src/request_response/mod.rs
@@ -110,9 +110,9 @@ pub const MAX_PARALLEL_STATEMENT_REQUESTS: u32 = 3;
 /// Response size limit for responses of POV like data.
 ///
 /// This is larger than `MAX_POV_SIZE` to account for protocol overhead and for additional data in
-/// `CollationFetchingV1` or `AvailableDataFetchingV1` for example. We try to err on larger limits here
-/// as a too large limit only allows an attacker to waste our bandwidth some more, a too low limit
-/// might have more severe effects.
+/// `CollationFetchingV1` or `AvailableDataFetchingV1` for example. We try to err on larger limits
+/// here as a too large limit only allows an attacker to waste our bandwidth some more, a too low
+/// limit might have more severe effects.
 const POV_RESPONSE_SIZE: u64 = MAX_POV_SIZE as u64 + 10_000;
 
 /// Maximum response sizes for `StatementFetchingV1`.
diff --git a/node/network/statement-distribution/src/lib.rs b/node/network/statement-distribution/src/lib.rs
index 160132011589..4cdf0d8af467 100644
--- a/node/network/statement-distribution/src/lib.rs
+++ b/node/network/statement-distribution/src/lib.rs
@@ -185,8 +185,8 @@ struct VcPerPeerTracker {
 }
 
 impl VcPerPeerTracker {
-	/// Note that the remote should now be aware that a validator has seconded a given candidate (by hash)
-	/// based on a message that we have sent it from our local pool.
+	/// Note that the remote should now be aware that a validator has seconded a given candidate (by
+	/// hash) based on a message that we have sent it from our local pool.
 	fn note_local(&mut self, h: CandidateHash) {
 		if !note_hash(&mut self.local_observed, h) {
 			gum::warn!(
@@ -198,8 +198,8 @@ impl VcPerPeerTracker {
 		}
 	}
 
-	/// Note that the remote should now be aware that a validator has seconded a given candidate (by hash)
-	/// based on a message that it has sent us.
+	/// Note that the remote should now be aware that a validator has seconded a given candidate (by
+	/// hash) based on a message that it has sent us.
 	///
 	/// Returns `true` if the peer was allowed to send us such a message, `false` otherwise.
 	fn note_remote(&mut self, h: CandidateHash) -> bool {
@@ -226,8 +226,8 @@ fn note_hash(
 /// knowledge that a peer has about goings-on in a relay parent.
 #[derive(Default)]
 struct PeerRelayParentKnowledge {
-	/// candidates that the peer is aware of because we sent statements to it. This indicates that we can
-	/// send other statements pertaining to that candidate.
+	/// candidates that the peer is aware of because we sent statements to it. This indicates that
+	/// we can send other statements pertaining to that candidate.
 	sent_candidates: HashSet<CandidateHash>,
 	/// candidates that peer is aware of, because we received statements from it.
 	received_candidates: HashSet<CandidateHash>,
@@ -321,13 +321,13 @@ impl PeerRelayParentKnowledge {
 		}
 	}
 
-	/// Attempt to update our view of the peer's knowledge with this statement's fingerprint based on
-	/// a message we are receiving from the peer.
+	/// Attempt to update our view of the peer's knowledge with this statement's fingerprint based
+	/// on a message we are receiving from the peer.
 	///
 	/// Provide the maximum message count that we can receive per candidate. In practice we should
-	/// not receive more statements for any one candidate than there are members in the group assigned
-	/// to that para, but this maximum needs to be lenient to account for equivocations that may be
-	/// cross-group. As such, a maximum of 2 * `n_validators` is recommended.
+	/// not receive more statements for any one candidate than there are members in the group
+	/// assigned to that para, but this maximum needs to be lenient to account for equivocations
+	/// that may be cross-group. As such, a maximum of 2 * `n_validators` is recommended.
 	///
 	/// This returns an error if the peer should not have sent us this message according to protocol
 	/// rules for flood protection.
@@ -490,13 +490,13 @@ impl PeerData {
 		self.view_knowledge.get(relay_parent).map_or(false, |k| k.can_send(fingerprint))
 	}
 
-	/// Attempt to update our view of the peer's knowledge with this statement's fingerprint based on
-	/// a message we are receiving from the peer.
+	/// Attempt to update our view of the peer's knowledge with this statement's fingerprint based
+	/// on a message we are receiving from the peer.
 	///
 	/// Provide the maximum message count that we can receive per candidate. In practice we should
-	/// not receive more statements for any one candidate than there are members in the group assigned
-	/// to that para, but this maximum needs to be lenient to account for equivocations that may be
-	/// cross-group. As such, a maximum of 2 * `n_validators` is recommended.
+	/// not receive more statements for any one candidate than there are members in the group
+	/// assigned to that para, but this maximum needs to be lenient to account for equivocations
+	/// that may be cross-group. As such, a maximum of 2 * `n_validators` is recommended.
 	///
 	/// This returns an error if the peer should not have sent us this message according to protocol
 	/// rules for flood protection.
@@ -600,8 +600,8 @@ enum NotedStatement<'a> {
 
 /// Large statement fetching status.
 enum LargeStatementStatus {
-	/// We are currently fetching the statement data from a remote peer. We keep a list of other nodes
-	/// claiming to have that data and will fallback on them.
+	/// We are currently fetching the statement data from a remote peer. We keep a list of other
+	/// nodes claiming to have that data and will fallback on them.
 	Fetching(FetchingInfo),
 	/// Statement data is fetched or we got it locally via `StatementDistributionMessage::Share`.
 	FetchedOrShared(CommittedCandidateReceipt),
@@ -712,8 +712,8 @@ impl ActiveHeadData {
 	/// to have been checked, including that the validator index is not out-of-bounds and
 	/// the signature is valid.
 	///
-	/// Any other statements or those that reference a candidate we are not aware of cannot be accepted
-	/// and will return `NotedStatement::NotUseful`.
+	/// Any other statements or those that reference a candidate we are not aware of cannot be
+	/// accepted and will return `NotedStatement::NotUseful`.
 	fn note_statement(&mut self, statement: SignedFullStatement) -> NotedStatement {
 		let validator_index = statement.validator_index();
 		let comparator = StoredStatementComparator {
@@ -1272,9 +1272,9 @@ async fn retrieve_statement_from_message<'a, Context>(
 					}
 				},
 				protocol_v1::StatementDistributionMessage::Statement(_, s) => {
-					// No fetch in progress, safe to return any statement immediately (we don't bother
-					// about normal network jitter which might cause `Valid` statements to arrive early
-					// for now.).
+					// No fetch in progress, safe to return any statement immediately (we don't
+					// bother about normal network jitter which might cause `Valid` statements to
+					// arrive early for now.).
 					return Some(s)
 				},
 			}
@@ -1470,7 +1470,8 @@ async fn handle_incoming_message<'a, Context>(
 		);
 
 		match rep {
-			// This happens when a Valid statement has been received but there is no corresponding Seconded
+			// This happens when a Valid statement has been received but there is no corresponding
+			// Seconded
 			COST_UNEXPECTED_STATEMENT_UNKNOWN_CANDIDATE => {
 				metrics.on_unexpected_statement_valid();
 				// Report peer merely if this is not a duplicate out-of-view statement that
diff --git a/node/network/statement-distribution/src/tests.rs b/node/network/statement-distribution/src/tests.rs
index 3f3e6e589616..62167f77a1e0 100644
--- a/node/network/statement-distribution/src/tests.rs
+++ b/node/network/statement-distribution/src/tests.rs
@@ -824,8 +824,8 @@ fn receiving_from_one_sends_to_another_and_to_candidate_backing() {
 			})
 			.await;
 
-		// receive a seconded statement from peer A. it should be propagated onwards to peer B and to
-		// candidate backing.
+		// receive a seconded statement from peer A. it should be propagated onwards to peer B and
+		// to candidate backing.
 		let statement = {
 			let signing_context = SigningContext { parent_hash: hash_a, session_index };
 
@@ -2536,8 +2536,8 @@ fn handle_multiple_seconded_statements() {
 			})
 			.await;
 
-		// receive a seconded statement from peer A. it should be propagated onwards to peer B and to
-		// candidate backing.
+		// receive a seconded statement from peer A. it should be propagated onwards to peer B and
+		// to candidate backing.
 		let statement = {
 			let signing_context = SigningContext { parent_hash: relay_parent_hash, session_index };
 
diff --git a/node/overseer/src/lib.rs b/node/overseer/src/lib.rs
index a2d553779fdc..ebf33d5247b1 100644
--- a/node/overseer/src/lib.rs
+++ b/node/overseer/src/lib.rs
@@ -211,10 +211,10 @@ impl Handle {
 
 	/// Wait for a block with the given hash to be in the active-leaves set.
 	///
-	/// The response channel responds if the hash was activated and is closed if the hash was deactivated.
-	/// Note that due the fact the overseer doesn't store the whole active-leaves set, only deltas,
-	/// the response channel may never return if the hash was deactivated before this call.
-	/// In this case, it's the caller's responsibility to ensure a timeout is set.
+	/// The response channel responds if the hash was activated and is closed if the hash was
+	/// deactivated. Note that due the fact the overseer doesn't store the whole active-leaves set,
+	/// only deltas, the response channel may never return if the hash was deactivated before this
+	/// call. In this case, it's the caller's responsibility to ensure a timeout is set.
 	pub async fn wait_for_activation(
 		&mut self,
 		hash: Hash,
@@ -355,7 +355,6 @@ pub async fn forward_events<P: BlockchainEvents<Block>>(client: Arc<P>, mut hand
 ///                         +-----------+
 ///                         |           |
 ///                         +-----------+
-///
 /// ```
 ///
 /// [`Subsystem`]: trait.Subsystem.html
@@ -363,8 +362,8 @@ pub async fn forward_events<P: BlockchainEvents<Block>>(client: Arc<P>, mut hand
 /// # Example
 ///
 /// The [`Subsystems`] may be any type as long as they implement an expected interface.
-/// Here, we create a mock validation subsystem and a few dummy ones and start the `Overseer` with them.
-/// For the sake of simplicity the termination of the example is done with a timeout.
+/// Here, we create a mock validation subsystem and a few dummy ones and start the `Overseer` with
+/// them. For the sake of simplicity the termination of the example is done with a timeout.
 /// ```
 /// # use std::time::Duration;
 /// # use futures::{executor, pin_mut, select, FutureExt};
@@ -394,11 +393,11 @@ pub async fn forward_events<P: BlockchainEvents<Block>>(client: Arc<P>, mut hand
 /// impl<Ctx> overseer::Subsystem<Ctx, SubsystemError> for ValidationSubsystem
 /// where
 ///     Ctx: overseer::SubsystemContext<
-///				Message=CandidateValidationMessage,
-///				AllMessages=AllMessages,
-///				Signal=OverseerSignal,
-///				Error=SubsystemError,
-///			>,
+/// 				Message=CandidateValidationMessage,
+/// 				AllMessages=AllMessages,
+/// 				Signal=OverseerSignal,
+/// 				Error=SubsystemError,
+/// 			>,
 /// {
 ///     fn start(
 ///         self,
@@ -426,10 +425,10 @@ pub async fn forward_events<P: BlockchainEvents<Block>>(client: Arc<P>, mut hand
 ///
 /// let spawner = sp_core::testing::TaskExecutor::new();
 /// let (overseer, _handle) = dummy_overseer_builder(spawner, AlwaysSupportsParachains, None)
-///		.unwrap()
-///		.replace_candidate_validation(|_| ValidationSubsystem)
-///		.build()
-///		.unwrap();
+/// 		.unwrap()
+/// 		.replace_candidate_validation(|_| ValidationSubsystem)
+/// 		.build()
+/// 		.unwrap();
 ///
 /// let timer = Delay::new(Duration::from_millis(50)).fuse();
 ///
@@ -825,7 +824,8 @@ where
 
 		// If there are no leaves being deactivated, we don't need to send an update.
 		//
-		// Our peers will be informed about our finalized block the next time we activating/deactivating some leaf.
+		// Our peers will be informed about our finalized block the next time we
+		// activating/deactivating some leaf.
 		if !update.is_empty() {
 			self.broadcast_signal(OverseerSignal::ActiveLeaves(update)).await?;
 		}
diff --git a/node/primitives/src/disputes/message.rs b/node/primitives/src/disputes/message.rs
index 992d70ba1324..89d3ea6c0af9 100644
--- a/node/primitives/src/disputes/message.rs
+++ b/node/primitives/src/disputes/message.rs
@@ -105,8 +105,8 @@ impl DisputeMessage {
 	/// - the invalid statement is indeed an invalid one
 	/// - the valid statement is indeed a valid one
 	/// - The passed `CandidateReceipt` has the correct hash (as signed in the statements).
-	/// - the given validator indices match with the given `ValidatorId`s in the statements,
-	///   given a `SessionInfo`.
+	/// - the given validator indices match with the given `ValidatorId`s in the statements, given a
+	///   `SessionInfo`.
 	///
 	/// We don't check whether the given `SessionInfo` matches the `SessionIndex` in the
 	/// statements, because we can't without doing a runtime query. Nevertheless this smart
diff --git a/node/primitives/src/disputes/status.rs b/node/primitives/src/disputes/status.rs
index 309225edc94b..d93c3ec846ce 100644
--- a/node/primitives/src/disputes/status.rs
+++ b/node/primitives/src/disputes/status.rs
@@ -16,7 +16,8 @@
 
 use parity_scale_codec::{Decode, Encode};
 
-/// Timestamp based on the 1 Jan 1970 UNIX base, which is persistent across node restarts and OS reboots.
+/// Timestamp based on the 1 Jan 1970 UNIX base, which is persistent across node restarts and OS
+/// reboots.
 pub type Timestamp = u64;
 
 /// The status of dispute.
@@ -88,8 +89,8 @@ impl DisputeStatus {
 		}
 	}
 
-	/// Transition the status to a new status after observing the dispute has concluded for the candidate.
-	/// This may be a no-op if the status was already concluded.
+	/// Transition the status to a new status after observing the dispute has concluded for the
+	/// candidate. This may be a no-op if the status was already concluded.
 	pub fn conclude_for(self, now: Timestamp) -> DisputeStatus {
 		match self {
 			DisputeStatus::Active | DisputeStatus::Confirmed => DisputeStatus::ConcludedFor(now),
@@ -98,8 +99,8 @@ impl DisputeStatus {
 		}
 	}
 
-	/// Transition the status to a new status after observing the dispute has concluded against the candidate.
-	/// This may be a no-op if the status was already concluded.
+	/// Transition the status to a new status after observing the dispute has concluded against the
+	/// candidate. This may be a no-op if the status was already concluded.
 	pub fn conclude_against(self, now: Timestamp) -> DisputeStatus {
 		match self {
 			DisputeStatus::Active | DisputeStatus::Confirmed =>
diff --git a/node/primitives/src/lib.rs b/node/primitives/src/lib.rs
index 1177dbc17caa..d49cd806d54e 100644
--- a/node/primitives/src/lib.rs
+++ b/node/primitives/src/lib.rs
@@ -180,8 +180,8 @@ impl std::fmt::Debug for Statement {
 impl Statement {
 	/// Get the candidate hash referenced by this statement.
 	///
-	/// If this is a `Statement::Seconded`, this does hash the candidate receipt, which may be expensive
-	/// for large candidates.
+	/// If this is a `Statement::Seconded`, this does hash the candidate receipt, which may be
+	/// expensive for large candidates.
 	pub fn candidate_hash(&self) -> CandidateHash {
 		match *self {
 			Statement::Valid(ref h) => *h,
@@ -215,8 +215,8 @@ impl EncodeAs<CompactStatement> for Statement {
 ///
 /// Signing context and validator set should be apparent from context.
 ///
-/// This statement is "full" in the sense that the `Seconded` variant includes the candidate receipt.
-/// Only the compact `SignedStatement` is suitable for submission to the chain.
+/// This statement is "full" in the sense that the `Seconded` variant includes the candidate
+/// receipt. Only the compact `SignedStatement` is suitable for submission to the chain.
 pub type SignedFullStatement = Signed<Statement, CompactStatement>;
 
 /// Variant of `SignedFullStatement` where the signature has not yet been verified.
@@ -256,8 +256,8 @@ pub enum InvalidCandidate {
 /// Result of the validation of the candidate.
 #[derive(Debug)]
 pub enum ValidationResult {
-	/// Candidate is valid. The validation process yields these outputs and the persisted validation
-	/// data used to form inputs.
+	/// Candidate is valid. The validation process yields these outputs and the persisted
+	/// validation data used to form inputs.
 	Valid(CandidateCommitments, PersistedValidationData),
 	/// Candidate is invalid.
 	Invalid(InvalidCandidate),
@@ -321,7 +321,8 @@ pub struct Collation<BlockNumber = polkadot_primitives::BlockNumber> {
 	pub proof_of_validity: MaybeCompressedPoV,
 	/// The number of messages processed from the DMQ.
 	pub processed_downward_messages: u32,
-	/// The mark which specifies the block number up to which all inbound HRMP messages are processed.
+	/// The mark which specifies the block number up to which all inbound HRMP messages are
+	/// processed.
 	pub hrmp_watermark: BlockNumber,
 }
 
@@ -344,9 +345,9 @@ pub struct CollationResult {
 	pub collation: Collation,
 	/// An optional result sender that should be informed about a successfully seconded collation.
 	///
-	/// There is no guarantee that this sender is informed ever about any result, it is completely okay to just drop it.
-	/// However, if it is called, it should be called with the signed statement of a parachain validator seconding the
-	/// collation.
+	/// There is no guarantee that this sender is informed ever about any result, it is completely
+	/// okay to just drop it. However, if it is called, it should be called with the signed
+	/// statement of a parachain validator seconding the collation.
 	pub result_sender: Option<futures::channel::oneshot::Sender<CollationSecondedSignal>>,
 }
 
@@ -362,8 +363,9 @@ impl CollationResult {
 
 /// Collation function.
 ///
-/// Will be called with the hash of the relay chain block the parachain block should be build on and the
-/// [`ValidationData`] that provides information about the state of the parachain on the relay chain.
+/// Will be called with the hash of the relay chain block the parachain block should be build on and
+/// the [`ValidationData`] that provides information about the state of the parachain on the relay
+/// chain.
 ///
 /// Returns an optional [`CollationResult`].
 #[cfg(not(target_os = "unknown"))]
diff --git a/node/service/src/chain_spec.rs b/node/service/src/chain_spec.rs
index a9e6b45f3b2d..7aabfa6e9185 100644
--- a/node/service/src/chain_spec.rs
+++ b/node/service/src/chain_spec.rs
@@ -529,11 +529,12 @@ fn kusama_staging_testnet_config_genesis(wasm_binary: &[u8]) -> kusama::RuntimeG
 		hex!["12b782529c22032ed4694e0f6e7d486be7daa6d12088f6bc74d593b3900b8438"].into(),
 	];
 
-	// for i in 1 2 3 4; do for j in stash controller; do subkey inspect "$SECRET//$i//$j"; done; done
-	// for i in 1 2 3 4; do for j in babe; do subkey --sr25519 inspect "$SECRET//$i//$j"; done; done
-	// for i in 1 2 3 4; do for j in grandpa; do subkey --ed25519 inspect "$SECRET//$i//$j"; done; done
-	// for i in 1 2 3 4; do for j in im_online; do subkey --sr25519 inspect "$SECRET//$i//$j"; done; done
-	// for i in 1 2 3 4; do for j in para_validator para_assignment; do subkey --sr25519 inspect "$SECRET//$i//$j"; done; done
+	// for i in 1 2 3 4; do for j in stash controller; do subkey inspect "$SECRET//$i//$j"; done;
+	// done for i in 1 2 3 4; do for j in babe; do subkey --sr25519 inspect "$SECRET//$i//$j"; done;
+	// done for i in 1 2 3 4; do for j in grandpa; do subkey --ed25519 inspect "$SECRET//$i//$j";
+	// done; done for i in 1 2 3 4; do for j in im_online; do subkey --sr25519 inspect
+	// "$SECRET//$i//$j"; done; done for i in 1 2 3 4; do for j in para_validator para_assignment;
+	// do subkey --sr25519 inspect "$SECRET//$i//$j"; done; done
 	let initial_authorities: Vec<(
 		AccountId,
 		AccountId,
diff --git a/node/service/src/fake_runtime_api.rs b/node/service/src/fake_runtime_api.rs
index b322114cbb75..d9553afa024b 100644
--- a/node/service/src/fake_runtime_api.rs
+++ b/node/service/src/fake_runtime_api.rs
@@ -16,7 +16,8 @@
 
 //! Provides "fake" runtime API implementations
 //!
-//! These are used to provide a type that implements these runtime APIs without requiring to import the native runtimes.
+//! These are used to provide a type that implements these runtime APIs without requiring to import
+//! the native runtimes.
 
 use beefy_primitives::ecdsa_crypto::{AuthorityId as BeefyId, Signature as BeefySignature};
 use grandpa_primitives::AuthorityId as GrandpaId;
diff --git a/node/service/src/lib.rs b/node/service/src/lib.rs
index fa8cb8ec77f7..4dda57110825 100644
--- a/node/service/src/lib.rs
+++ b/node/service/src/lib.rs
@@ -696,9 +696,10 @@ pub const AVAILABILITY_CONFIG: AvailabilityConfig = AvailabilityConfig {
 /// This is an advanced feature and not recommended for general use. Generally, `build_full` is
 /// a better choice.
 ///
-/// `overseer_enable_anyways` always enables the overseer, based on the provided `OverseerGenerator`,
-/// regardless of the role the node has. The relay chain selection (longest or disputes-aware) is
-/// still determined based on the role of the node. Likewise for authority discovery.
+/// `overseer_enable_anyways` always enables the overseer, based on the provided
+/// `OverseerGenerator`, regardless of the role the node has. The relay chain selection (longest or
+/// disputes-aware) is still determined based on the role of the node. Likewise for authority
+/// discovery.
 ///
 /// `workers_path` is used to get the path to the directory where auxiliary worker binaries reside.
 /// If not specified, the main binary's directory is searched first, then `/usr/lib/polkadot` is
@@ -1331,9 +1332,10 @@ pub fn new_chain_ops(
 /// The actual "flavor", aka if it will use `Polkadot`, `Rococo` or `Kusama` is determined based on
 /// [`IdentifyVariant`] using the chain spec.
 ///
-/// `overseer_enable_anyways` always enables the overseer, based on the provided `OverseerGenerator`,
-/// regardless of the role the node has. The relay chain selection (longest or disputes-aware) is
-/// still determined based on the role of the node. Likewise for authority discovery.
+/// `overseer_enable_anyways` always enables the overseer, based on the provided
+/// `OverseerGenerator`, regardless of the role the node has. The relay chain selection (longest or
+/// disputes-aware) is still determined based on the role of the node. Likewise for authority
+/// discovery.
 #[cfg(feature = "full-node")]
 pub fn build_full<OverseerGenerator: OverseerGen>(
 	config: Configuration,
diff --git a/node/service/src/relay_chain_selection.rs b/node/service/src/relay_chain_selection.rs
index afc0ce320610..189073783f0d 100644
--- a/node/service/src/relay_chain_selection.rs
+++ b/node/service/src/relay_chain_selection.rs
@@ -472,8 +472,8 @@ where
 		let lag = initial_leaf_number.saturating_sub(subchain_number);
 		self.metrics.note_approval_checking_finality_lag(lag);
 
-		// Messages sent to `approval-distrbution` are known to have high `ToF`, we need to spawn a task for sending
-		// the message to not block here and delay finality.
+		// Messages sent to `approval-distrbution` are known to have high `ToF`, we need to spawn a
+		// task for sending the message to not block here and delay finality.
 		if let Some(spawn_handle) = &self.spawn_handle {
 			let mut overseer_handle = self.overseer.clone();
 			let lag_update_task = async move {
@@ -537,9 +537,10 @@ where
 							error = ?e,
 							"Call to `DetermineUndisputedChain` failed",
 						);
-						// We need to return a sane finality target. But, we are unable to ensure we are not
-						// finalizing something that is being disputed or has been concluded as invalid. We will be
-						// conservative here and not vote for finality above the ancestor passed in.
+						// We need to return a sane finality target. But, we are unable to ensure we
+						// are not finalizing something that is being disputed or has been concluded
+						// as invalid. We will be conservative here and not vote for finality above
+						// the ancestor passed in.
 						return Ok(target_hash)
 					},
 				};
diff --git a/node/service/src/tests.rs b/node/service/src/tests.rs
index 424af4d22a26..95d5765bad45 100644
--- a/node/service/src/tests.rs
+++ b/node/service/src/tests.rs
@@ -498,8 +498,8 @@ struct CaseVars {
 
 /// ```raw
 /// genesis -- 0xA1 --- 0xA2 --- 0xA3 --- 0xA4(!avail) --- 0xA5(!avail)
-///			   \
-///				`- 0xB2
+/// 			   \
+/// 				`- 0xB2
 /// ```
 fn chain_undisputed() -> CaseVars {
 	let head: Hash = ChainBuilder::GENESIS_HASH;
@@ -529,8 +529,8 @@ fn chain_undisputed() -> CaseVars {
 
 /// ```raw
 /// genesis -- 0xA1 --- 0xA2 --- 0xA3(disputed) --- 0xA4(!avail) --- 0xA5(!avail)
-///			   \
-///				`- 0xB2
+/// 			   \
+/// 				`- 0xB2
 /// ```
 fn chain_0() -> CaseVars {
 	let head: Hash = ChainBuilder::GENESIS_HASH;
@@ -560,8 +560,8 @@ fn chain_0() -> CaseVars {
 
 /// ```raw
 /// genesis -- 0xA1 --- 0xA2(disputed) --- 0xA3
-///			   \
-///				`- 0xB2 --- 0xB3(!available)
+/// 			   \
+/// 				`- 0xB2 --- 0xB3(!available)
 /// ```
 fn chain_1() -> CaseVars {
 	let head: Hash = ChainBuilder::GENESIS_HASH;
@@ -588,8 +588,8 @@ fn chain_1() -> CaseVars {
 
 /// ```raw
 /// genesis -- 0xA1 --- 0xA2(disputed) --- 0xA3
-///			   \
-///				`- 0xB2 --- 0xB3
+/// 			   \
+/// 				`- 0xB2 --- 0xB3
 /// ```
 fn chain_2() -> CaseVars {
 	let head: Hash = ChainBuilder::GENESIS_HASH;
@@ -616,8 +616,8 @@ fn chain_2() -> CaseVars {
 
 /// ```raw
 /// genesis -- 0xA1 --- 0xA2 --- 0xA3(disputed)
-///			   \
-///				`- 0xB2 --- 0xB3
+/// 			   \
+/// 				`- 0xB2 --- 0xB3
 /// ```
 fn chain_3() -> CaseVars {
 	let head: Hash = ChainBuilder::GENESIS_HASH;
@@ -644,10 +644,10 @@ fn chain_3() -> CaseVars {
 
 /// ```raw
 /// genesis -- 0xA1 --- 0xA2 --- 0xA3(disputed)
-///			   \
-///				`- 0xB2 --- 0xB3
+/// 			   \
+/// 				`- 0xB2 --- 0xB3
 ///
-///	            ? --- NEX(does_not_exist)
+/// 	            ? --- NEX(does_not_exist)
 /// ```
 fn chain_4() -> CaseVars {
 	let head: Hash = ChainBuilder::GENESIS_HASH;
diff --git a/node/subsystem-test-helpers/src/lib.rs b/node/subsystem-test-helpers/src/lib.rs
index 4170f22c5b86..fb908278aa7d 100644
--- a/node/subsystem-test-helpers/src/lib.rs
+++ b/node/subsystem-test-helpers/src/lib.rs
@@ -310,7 +310,8 @@ pub fn make_buffered_subsystem_context<M, S>(
 
 /// Test a subsystem, mocking the overseer
 ///
-/// Pass in two async closures: one mocks the overseer, the other runs the test from the perspective of a subsystem.
+/// Pass in two async closures: one mocks the overseer, the other runs the test from the perspective
+/// of a subsystem.
 ///
 /// Times out in 5 seconds.
 pub fn subsystem_test_harness<M, OverseerFactory, Overseer, TestFactory, Test>(
diff --git a/node/subsystem-types/src/lib.rs b/node/subsystem-types/src/lib.rs
index 88c7165bcd80..f438a09592c1 100644
--- a/node/subsystem-types/src/lib.rs
+++ b/node/subsystem-types/src/lib.rs
@@ -82,8 +82,8 @@ pub struct ActivatedLeaf {
 	pub status: LeafStatus,
 	/// An associated [`jaeger::Span`].
 	///
-	/// NOTE: Each span should only be kept active as long as the leaf is considered active and should be dropped
-	/// when the leaf is deactivated.
+	/// NOTE: Each span should only be kept active as long as the leaf is considered active and
+	/// should be dropped when the leaf is deactivated.
 	pub span: Arc<jaeger::Span>,
 }
 
diff --git a/node/subsystem-types/src/messages.rs b/node/subsystem-types/src/messages.rs
index 8f2e3375b6f1..d5dcea7a2565 100644
--- a/node/subsystem-types/src/messages.rs
+++ b/node/subsystem-types/src/messages.rs
@@ -16,8 +16,8 @@
 
 //! Message types for the overseer and subsystems.
 //!
-//! These messages are intended to define the protocol by which different subsystems communicate with each
-//! other and signals that they receive from an overseer to coordinate their work.
+//! These messages are intended to define the protocol by which different subsystems communicate
+//! with each other and signals that they receive from an overseer to coordinate their work.
 //! This is intended for use with the `polkadot-overseer` crate.
 //!
 //! Subsystems' APIs are defined separately from their implementation, leading to easier mocking.
@@ -62,12 +62,13 @@ pub enum CandidateBackingMessage {
 	/// Requests a set of backable candidates that could be backed in a child of the given
 	/// relay-parent, referenced by its hash.
 	GetBackedCandidates(Hash, Vec<CandidateHash>, oneshot::Sender<Vec<BackedCandidate>>),
-	/// Note that the Candidate Backing subsystem should second the given candidate in the context of the
-	/// given relay-parent (ref. by hash). This candidate must be validated.
+	/// Note that the Candidate Backing subsystem should second the given candidate in the context
+	/// of the given relay-parent (ref. by hash). This candidate must be validated.
 	Second(Hash, CandidateReceipt, PoV),
-	/// Note a validator's statement about a particular candidate. Disagreements about validity must be escalated
-	/// to a broader check by the Disputes Subsystem, though that escalation is deferred until the approval voting
-	/// stage to guarantee availability. Agreements are simply tallied until a quorum is reached.
+	/// Note a validator's statement about a particular candidate. Disagreements about validity
+	/// must be escalated to a broader check by the Disputes Subsystem, though that escalation is
+	/// deferred until the approval voting stage to guarantee availability. Agreements are simply
+	/// tallied until a quorum is reached.
 	Statement(Hash, SignedFullStatement),
 }
 
@@ -143,8 +144,8 @@ pub enum CandidateValidationMessage {
 	/// Try to compile the given validation code and send back
 	/// the outcome.
 	///
-	/// The validation code is specified by the hash and will be queried from the runtime API at the
-	/// given relay-parent.
+	/// The validation code is specified by the hash and will be queried from the runtime API at
+	/// the given relay-parent.
 	PreCheck(
 		// Relay-parent
 		Hash,
@@ -157,16 +158,16 @@ pub enum CandidateValidationMessage {
 #[derive(Debug, derive_more::From)]
 pub enum CollatorProtocolMessage {
 	/// Signal to the collator protocol that it should connect to validators with the expectation
-	/// of collating on the given para. This is only expected to be called once, early on, if at all,
-	/// and only by the Collation Generation subsystem. As such, it will overwrite the value of
-	/// the previous signal.
+	/// of collating on the given para. This is only expected to be called once, early on, if at
+	/// all, and only by the Collation Generation subsystem. As such, it will overwrite the value
+	/// of the previous signal.
 	///
 	/// This should be sent before any `DistributeCollation` message.
 	CollateOn(ParaId),
 	/// Provide a collation to distribute to validators with an optional result sender.
 	///
-	/// The result sender should be informed when at least one parachain validator seconded the collation. It is also
-	/// completely okay to just drop the sender.
+	/// The result sender should be informed when at least one parachain validator seconded the
+	/// collation. It is also completely okay to just drop the sender.
 	DistributeCollation(CandidateReceipt, PoV, Option<oneshot::Sender<CollationSecondedSignal>>),
 	/// Report a collator as having provided an invalid collation. This should lead to disconnect
 	/// and blacklist of the collator.
@@ -174,7 +175,8 @@ pub enum CollatorProtocolMessage {
 	/// Get a network bridge update.
 	#[from]
 	NetworkBridgeUpdate(NetworkBridgeEvent<net_protocol::CollatorProtocolMessage>),
-	/// We recommended a particular candidate to be seconded, but it was invalid; penalize the collator.
+	/// We recommended a particular candidate to be seconded, but it was invalid; penalize the
+	/// collator.
 	///
 	/// The hash is the relay parent.
 	Invalid(Hash, CandidateReceipt),
@@ -198,14 +200,15 @@ impl Default for CollatorProtocolMessage {
 pub enum DisputeCoordinatorMessage {
 	/// Import statements by validators about a candidate.
 	///
-	/// The subsystem will silently discard ancient statements or sets of only dispute-specific statements for
-	/// candidates that are previously unknown to the subsystem. The former is simply because ancient
-	/// data is not relevant and the latter is as a DoS prevention mechanism. Both backing and approval
-	/// statements already undergo anti-DoS procedures in their respective subsystems, but statements
-	/// cast specifically for disputes are not necessarily relevant to any candidate the system is
-	/// already aware of and thus present a DoS vector. Our expectation is that nodes will notify each
-	/// other of disputes over the network by providing (at least) 2 conflicting statements, of which one is either
-	/// a backing or validation statement.
+	/// The subsystem will silently discard ancient statements or sets of only dispute-specific
+	/// statements for candidates that are previously unknown to the subsystem. The former is
+	/// simply because ancient data is not relevant and the latter is as a DoS prevention
+	/// mechanism. Both backing and approval statements already undergo anti-DoS procedures in
+	/// their respective subsystems, but statements cast specifically for disputes are not
+	/// necessarily relevant to any candidate the system is already aware of and thus present a DoS
+	/// vector. Our expectation is that nodes will notify each other of disputes over the network
+	/// by providing (at least) 2 conflicting statements, of which one is either a backing or
+	/// validation statement.
 	///
 	/// This does not do any checking of the message signature.
 	ImportStatements {
@@ -222,16 +225,16 @@ pub enum DisputeCoordinatorMessage {
 		///
 		/// This is:
 		/// - we discarded the votes because
-		///		- they were ancient or otherwise invalid (result: `InvalidImport`)
-		///		- or we were not able to recover availability for an unknown candidate (result:
+		/// 		- they were ancient or otherwise invalid (result: `InvalidImport`)
+		/// 		- or we were not able to recover availability for an unknown candidate (result:
 		///		`InvalidImport`)
-		///		- or were known already (in that case the result will still be `ValidImport`)
+		/// 		- or were known already (in that case the result will still be `ValidImport`)
 		/// - or we recorded them because (`ValidImport`)
-		///		- we cast our own vote already on that dispute
-		///		- or we have approval votes on that candidate
-		///		- or other explicit votes on that candidate already recorded
-		///		- or recovered availability for the candidate
-		///		- or the imported statements are backing/approval votes, which are always accepted.
+		/// 		- we cast our own vote already on that dispute
+		/// 		- or we have approval votes on that candidate
+		/// 		- or other explicit votes on that candidate already recorded
+		/// 		- or recovered availability for the candidate
+		/// 		- or the imported statements are backing/approval votes, which are always accepted.
 		pending_confirmation: Option<oneshot::Sender<ImportStatementsResult>>,
 	},
 	/// Fetch a list of all recent disputes the coordinator is aware of.
@@ -246,15 +249,17 @@ pub enum DisputeCoordinatorMessage {
 		Vec<(SessionIndex, CandidateHash)>,
 		oneshot::Sender<Vec<(SessionIndex, CandidateHash, CandidateVotes)>>,
 	),
-	/// Sign and issue local dispute votes. A value of `true` indicates validity, and `false` invalidity.
+	/// Sign and issue local dispute votes. A value of `true` indicates validity, and `false`
+	/// invalidity.
 	IssueLocalStatement(SessionIndex, CandidateHash, CandidateReceipt, bool),
 	/// Determine the highest undisputed block within the given chain, based on where candidates
 	/// were included. If even the base block should not be finalized due to a dispute,
 	/// then `None` should be returned on the channel.
 	///
-	/// The block descriptions begin counting upwards from the block after the given `base_number`. The `base_number`
-	/// is typically the number of the last finalized block but may be slightly higher. This block
-	/// is inevitably going to be finalized so it is not accounted for by this function.
+	/// The block descriptions begin counting upwards from the block after the given `base_number`.
+	/// The `base_number` is typically the number of the last finalized block but may be slightly
+	/// higher. This block is inevitably going to be finalized so it is not accounted for by this
+	/// function.
 	DetermineUndisputedChain {
 		/// The lowest possible block to vote on.
 		base: (BlockNumber, Hash),
@@ -369,8 +374,8 @@ pub enum NetworkBridgeTxMessage {
 		/// authority discovery has failed to resolve.
 		failed: oneshot::Sender<usize>,
 	},
-	/// Alternative to `ConnectToValidators` in case you already know the `Multiaddrs` you want to be
-	/// connected to.
+	/// Alternative to `ConnectToValidators` in case you already know the `Multiaddrs` you want to
+	/// be connected to.
 	ConnectToResolvedValidators {
 		/// Each entry corresponds to the addresses of an already resolved validator.
 		validator_addrs: Vec<HashSet<Multiaddr>>,
@@ -576,8 +581,8 @@ pub enum RuntimeApiRequest {
 		OccupiedCoreAssumption,
 		RuntimeApiSender<Option<PersistedValidationData>>,
 	),
-	/// Get the persisted validation data for a particular para along with the current validation code
-	/// hash, matching the data hash against an expected one.
+	/// Get the persisted validation data for a particular para along with the current validation
+	/// code hash, matching the data hash against an expected one.
 	AssumedValidationData(
 		ParaId,
 		Hash,
@@ -595,10 +600,11 @@ pub enum RuntimeApiRequest {
 	/// will inform on how the validation data should be computed if the para currently
 	/// occupies a core.
 	ValidationCode(ParaId, OccupiedCoreAssumption, RuntimeApiSender<Option<ValidationCode>>),
-	/// Get validation code by its hash, either past, current or future code can be returned, as long as state is still
-	/// available.
+	/// Get validation code by its hash, either past, current or future code can be returned, as
+	/// long as state is still available.
 	ValidationCodeByHash(ValidationCodeHash, RuntimeApiSender<Option<ValidationCode>>),
-	/// Get a the candidate pending availability for a particular parachain by parachain / core index
+	/// Get a the candidate pending availability for a particular parachain by parachain / core
+	/// index
 	CandidatePendingAvailability(ParaId, RuntimeApiSender<Option<CommittedCandidateReceipt>>),
 	/// Get all events concerning candidates (backing, inclusion, time-out) in the parent of
 	/// the block in whose state this request is executed.
@@ -623,8 +629,9 @@ pub enum RuntimeApiRequest {
 	SubmitPvfCheckStatement(PvfCheckStatement, ValidatorSignature, RuntimeApiSender<()>),
 	/// Returns code hashes of PVFs that require pre-checking by validators in the active set.
 	PvfsRequirePrecheck(RuntimeApiSender<Vec<ValidationCodeHash>>),
-	/// Get the validation code used by the specified para, taking the given `OccupiedCoreAssumption`, which
-	/// will inform on how the validation data should be computed if the para currently occupies a core.
+	/// Get the validation code used by the specified para, taking the given
+	/// `OccupiedCoreAssumption`, which will inform on how the validation data should be computed
+	/// if the para currently occupies a core.
 	ValidationCodeHash(
 		ParaId,
 		OccupiedCoreAssumption,
@@ -686,13 +693,15 @@ pub enum StatementDistributionMessage {
 	NetworkBridgeUpdate(NetworkBridgeEvent<net_protocol::StatementDistributionMessage>),
 }
 
-/// This data becomes intrinsics or extrinsics which should be included in a future relay chain block.
+/// This data becomes intrinsics or extrinsics which should be included in a future relay chain
+/// block.
 // It needs to be cloneable because multiple potential block authors can request copies.
 #[derive(Debug, Clone)]
 pub enum ProvisionableData {
 	/// This bitfield indicates the availability of various candidate blocks.
 	Bitfield(Hash, SignedAvailabilityBitfield),
-	/// The Candidate Backing subsystem believes that this candidate is valid, pending availability.
+	/// The Candidate Backing subsystem believes that this candidate is valid, pending
+	/// availability.
 	BackedCandidate(CandidateReceipt),
 	/// Misbehavior reports are self-contained proofs of validator misbehavior.
 	MisbehaviorReport(Hash, ValidatorIndex, Misbehavior),
@@ -716,11 +725,11 @@ pub struct ProvisionerInherentData {
 /// In all cases, the Hash is that of the relay parent.
 #[derive(Debug)]
 pub enum ProvisionerMessage {
-	/// This message allows external subsystems to request the set of bitfields and backed candidates
-	/// associated with a particular potential block hash.
+	/// This message allows external subsystems to request the set of bitfields and backed
+	/// candidates associated with a particular potential block hash.
 	///
-	/// This is expected to be used by a proposer, to inject that information into the `InherentData`
-	/// where it can be assembled into the `ParaInherent`.
+	/// This is expected to be used by a proposer, to inject that information into the
+	/// `InherentData` where it can be assembled into the `ParaInherent`.
 	RequestInherentData(Hash, oneshot::Sender<ProvisionerInherentData>),
 	/// This data should become part of a relay chain block
 	ProvisionableData(Hash, ProvisionableData),
diff --git a/node/subsystem-types/src/runtime_client.rs b/node/subsystem-types/src/runtime_client.rs
index 196b928ad62b..4d8eddde73e9 100644
--- a/node/subsystem-types/src/runtime_client.rs
+++ b/node/subsystem-types/src/runtime_client.rs
@@ -138,7 +138,7 @@ pub trait RuntimeApiSubsystemClient {
 	async fn on_chain_votes(&self, at: Hash)
 		-> Result<Option<ScrapedOnChainVotes<Hash>>, ApiError>;
 
-	/***** Added in v2 *****/
+	/***** Added in v2 **** */
 
 	/// Get the session info for the given session, if stored.
 	///
@@ -164,7 +164,8 @@ pub trait RuntimeApiSubsystemClient {
 	/// NOTE: This function is only available since parachain host version 2.
 	async fn pvfs_require_precheck(&self, at: Hash) -> Result<Vec<ValidationCodeHash>, ApiError>;
 
-	/// Fetch the hash of the validation code used by a para, making the given `OccupiedCoreAssumption`.
+	/// Fetch the hash of the validation code used by a para, making the given
+	/// `OccupiedCoreAssumption`.
 	///
 	/// NOTE: This function is only available since parachain host version 2.
 	async fn validation_code_hash(
@@ -174,7 +175,7 @@ pub trait RuntimeApiSubsystemClient {
 		assumption: OccupiedCoreAssumption,
 	) -> Result<Option<ValidationCodeHash>, ApiError>;
 
-	/***** Added in v3 *****/
+	/***** Added in v3 **** */
 
 	/// Returns all onchain disputes.
 	/// This is a staging method! Do not use on production runtimes!
diff --git a/node/subsystem-util/src/lib.rs b/node/subsystem-util/src/lib.rs
index de869bd91f12..e0b81608ff2f 100644
--- a/node/subsystem-util/src/lib.rs
+++ b/node/subsystem-util/src/lib.rs
@@ -20,7 +20,8 @@
 //! or determining what their validator ID is. These common interests are factored into
 //! this module.
 //!
-//! This crate also reexports Prometheus metric types which are expected to be implemented by subsystems.
+//! This crate also reexports Prometheus metric types which are expected to be implemented by
+//! subsystems.
 
 #![warn(missing_docs)]
 
@@ -60,7 +61,8 @@ pub use polkadot_node_network_protocol::MIN_GOSSIP_PEERS;
 
 pub use determine_new_blocks::determine_new_blocks;
 
-/// These reexports are required so that external crates can use the `delegated_subsystem` macro properly.
+/// These reexports are required so that external crates can use the `delegated_subsystem` macro
+/// properly.
 pub mod reexports {
 	pub use polkadot_overseer::gen::{SpawnedSubsystem, Spawner, Subsystem, SubsystemContext};
 }
@@ -367,7 +369,8 @@ pub struct Validator {
 }
 
 impl Validator {
-	/// Get a struct representing this node's validator if this node is in fact a validator in the context of the given block.
+	/// Get a struct representing this node's validator if this node is in fact a validator in the
+	/// context of the given block.
 	pub async fn new<S>(parent: Hash, keystore: KeystorePtr, sender: &mut S) -> Result<Self, Error>
 	where
 		S: SubsystemSender<RuntimeApiMessage>,
diff --git a/node/subsystem-util/src/nesting_sender.rs b/node/subsystem-util/src/nesting_sender.rs
index 4417efbefb04..5d80dbf78101 100644
--- a/node/subsystem-util/src/nesting_sender.rs
+++ b/node/subsystem-util/src/nesting_sender.rs
@@ -33,14 +33,14 @@
 //!
 //! This module helps with this in part. It does not break the multithreaded by default approach,
 //! but it breaks the `spawn everything` approach. So once you `spawn` you will still be
-//! multithreaded by default, despite that for most tasks we spawn (which just wait for network or some
-//! message to arrive), that is very much pointless and needless overhead. You will just spawn less in
-//! the first place.
+//! multithreaded by default, despite that for most tasks we spawn (which just wait for network or
+//! some message to arrive), that is very much pointless and needless overhead. You will just spawn
+//! less in the first place.
 //!
 //! By default your code is single threaded, except when actually needed:
-//!		- need to wait for long running synchronous IO (a threaded runtime is actually useful here)
-//!		- need to wait for some async event (message to arrive)
-//!		- need to do some hefty CPU bound processing (a thread is required here as well)
+//! 		- need to wait for long running synchronous IO (a threaded runtime is actually useful here)
+//! 		- need to wait for some async event (message to arrive)
+//! 		- need to do some hefty CPU bound processing (a thread is required here as well)
 //!
 //! and it is not acceptable to block the main task for waiting for the result, because we actually
 //! really have other things to do or at least need to stay responsive just in case.
@@ -48,7 +48,8 @@
 //! With the types and traits in this module you can achieve exactly that: You write modules which
 //! just execute logic and can call into the functions of other modules - yes we are calling normal
 //! functions. For the case a module you are calling into requires an occasional background task,
-//! you provide it with a `NestingSender<M, ChildModuleMessage>` that it can pass to any spawned tasks.
+//! you provide it with a `NestingSender<M, ChildModuleMessage>` that it can pass to any spawned
+//! tasks.
 //!
 //! This way you don't have to spawn a task for each module just for it to be able to handle
 //! asynchronous events. The module relies on the using/enclosing code/module to forward it any
@@ -65,9 +66,9 @@
 //! Because the wrapping is optional and transparent to the lower modules, each module can also be
 //! used at the top directly without any wrapping, e.g. for standalone use or for testing purposes.
 //!
-//! Checkout the documentation of [`NestingSender`][nesting_sender::NestingSender] below for a basic usage example. For a real
-//! world usage I would like to point you to the dispute-distribution subsystem which makes use of
-//! this architecture.
+//! Checkout the documentation of [`NestingSender`][nesting_sender::NestingSender] below for a basic
+//! usage example. For a real world usage I would like to point you to the dispute-distribution
+//! subsystem which makes use of this architecture.
 //!
 //! ## Limitations
 //!
diff --git a/node/subsystem-util/src/reputation.rs b/node/subsystem-util/src/reputation.rs
index 09c00bb4688a..89e3eb64df9b 100644
--- a/node/subsystem-util/src/reputation.rs
+++ b/node/subsystem-util/src/reputation.rs
@@ -48,7 +48,8 @@ impl ReputationAggregator {
 	///
 	/// * `send_immediately_if` - A function, takes `UnifiedReputationChange`,
 	/// results shows if we need to send the changes right away.
-	/// By default, it is used for sending `UnifiedReputationChange::Malicious` changes immediately and for testing.
+	/// By default, it is used for sending `UnifiedReputationChange::Malicious` changes immediately
+	/// and for testing.
 	pub fn new(send_immediately_if: fn(UnifiedReputationChange) -> bool) -> Self {
 		Self { by_peer: Default::default(), send_immediately_if }
 	}
diff --git a/node/test/client/src/block_builder.rs b/node/test/client/src/block_builder.rs
index 88160e782a70..0987cef55c1f 100644
--- a/node/test/client/src/block_builder.rs
+++ b/node/test/client/src/block_builder.rs
@@ -32,15 +32,16 @@ use sp_state_machine::BasicExternalities;
 pub trait InitPolkadotBlockBuilder {
 	/// Init a Polkadot specific block builder that works for the test runtime.
 	///
-	/// This will automatically create and push the inherents for you to make the block valid for the test runtime.
+	/// This will automatically create and push the inherents for you to make the block valid for
+	/// the test runtime.
 	fn init_polkadot_block_builder(
 		&self,
 	) -> sc_block_builder::BlockBuilder<Block, Client, FullBackend>;
 
 	/// Init a Polkadot specific block builder at a specific block that works for the test runtime.
 	///
-	/// Same as [`InitPolkadotBlockBuilder::init_polkadot_block_builder`] besides that it takes a [`BlockId`] to say
-	/// which should be the parent block of the block that is being build.
+	/// Same as [`InitPolkadotBlockBuilder::init_polkadot_block_builder`] besides that it takes a
+	/// [`BlockId`] to say which should be the parent block of the block that is being build.
 	fn init_polkadot_block_builder_at(
 		&self,
 		hash: <Block as BlockT>::Hash,
@@ -60,7 +61,8 @@ impl InitPolkadotBlockBuilder for Client {
 		let last_timestamp =
 			self.runtime_api().get_last_timestamp(hash).expect("Get last timestamp");
 
-		// `MinimumPeriod` is a storage parameter type that requires externalities to access the value.
+		// `MinimumPeriod` is a storage parameter type that requires externalities to access the
+		// value.
 		let minimum_period = BasicExternalities::new_empty()
 			.execute_with(|| polkadot_test_runtime::MinimumPeriod::get());
 
@@ -73,7 +75,8 @@ impl InitPolkadotBlockBuilder for Client {
 			last_timestamp + minimum_period
 		};
 
-		// `SlotDuration` is a storage parameter type that requires externalities to access the value.
+		// `SlotDuration` is a storage parameter type that requires externalities to access the
+		// value.
 		let slot_duration = BasicExternalities::new_empty()
 			.execute_with(|| polkadot_test_runtime::SlotDuration::get());
 
@@ -130,9 +133,9 @@ impl InitPolkadotBlockBuilder for Client {
 pub trait BlockBuilderExt {
 	/// Push a Polkadot test runtime specific extrinsic to the block.
 	///
-	/// This will internally use the [`BlockBuilder::push`] method, but this method expects a opaque extrinsic. So,
-	/// we provide this wrapper which converts a test runtime specific extrinsic to a opaque extrinsic and pushes it to
-	/// the block.
+	/// This will internally use the [`BlockBuilder::push`] method, but this method expects a opaque
+	/// extrinsic. So, we provide this wrapper which converts a test runtime specific extrinsic to a
+	/// opaque extrinsic and pushes it to the block.
 	///
 	/// Returns the result of the application of the extrinsic.
 	fn push_polkadot_extrinsic(
diff --git a/node/test/service/src/lib.rs b/node/test/service/src/lib.rs
index a2c1b1941003..ed25d28d2925 100644
--- a/node/test/service/src/lib.rs
+++ b/node/test/service/src/lib.rs
@@ -257,7 +257,8 @@ pub struct PolkadotTestNode {
 	pub client: Arc<Client>,
 	/// A handle to Overseer.
 	pub overseer_handle: Handle,
-	/// The `MultiaddrWithPeerId` to this node. This is useful if you want to pass it as "boot node" to other nodes.
+	/// The `MultiaddrWithPeerId` to this node. This is useful if you want to pass it as "boot
+	/// node" to other nodes.
 	pub addr: MultiaddrWithPeerId,
 	/// `RPCHandlers` to make RPC queries.
 	pub rpc_handlers: RpcHandlers,
@@ -312,14 +313,15 @@ impl PolkadotTestNode {
 		self.send_sudo(call, Sr25519Keyring::Alice, 1).await
 	}
 
-	/// Wait for `count` blocks to be imported in the node and then exit. This function will not return if no blocks
-	/// are ever created, thus you should restrict the maximum amount of time of the test execution.
+	/// Wait for `count` blocks to be imported in the node and then exit. This function will not
+	/// return if no blocks are ever created, thus you should restrict the maximum amount of time of
+	/// the test execution.
 	pub fn wait_for_blocks(&self, count: usize) -> impl Future<Output = ()> {
 		self.client.wait_for_blocks(count)
 	}
 
-	/// Wait for `count` blocks to be finalized and then exit. Similarly with `wait_for_blocks` this function will
-	/// not return if no block are ever finalized.
+	/// Wait for `count` blocks to be finalized and then exit. Similarly with `wait_for_blocks` this
+	/// function will not return if no block are ever finalized.
 	pub async fn wait_for_finalized_blocks(&self, count: usize) {
 		let mut import_notification_stream = self.client.finality_notification_stream();
 		let mut blocks = HashSet::new();
diff --git a/parachain/src/primitives.rs b/parachain/src/primitives.rs
index 18da89aa97a1..55577618c469 100644
--- a/parachain/src/primitives.rs
+++ b/parachain/src/primitives.rs
@@ -287,13 +287,13 @@ impl IsSystem for Sibling {
 	}
 }
 
-/// A type that uniquely identifies an HRMP channel. An HRMP channel is established between two paras.
-/// In text, we use the notation `(A, B)` to specify a channel between A and B. The channels are
-/// unidirectional, meaning that `(A, B)` and `(B, A)` refer to different channels. The convention is
-/// that we use the first item tuple for the sender and the second for the recipient. Only one channel
-/// is allowed between two participants in one direction, i.e. there cannot be 2 different channels
-/// identified by `(A, B)`. A channel with the same para id in sender and recipient is invalid. That
-/// is, however, not enforced.
+/// A type that uniquely identifies an HRMP channel. An HRMP channel is established between two
+/// paras. In text, we use the notation `(A, B)` to specify a channel between A and B. The channels
+/// are unidirectional, meaning that `(A, B)` and `(B, A)` refer to different channels. The
+/// convention is that we use the first item tuple for the sender and the second for the recipient.
+/// Only one channel is allowed between two participants in one direction, i.e. there cannot be 2
+/// different channels identified by `(A, B)`. A channel with the same para id in sender and
+/// recipient is invalid. That is, however, not enforced.
 #[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Encode, Decode, RuntimeDebug, TypeInfo)]
 #[cfg_attr(feature = "std", derive(Hash))]
 pub struct HrmpChannelId {
@@ -414,6 +414,7 @@ pub struct ValidationResult {
 	///
 	/// It is expected that the Parachain processes them from first to last.
 	pub processed_downward_messages: u32,
-	/// The mark which specifies the block number up to which all inbound HRMP messages are processed.
+	/// The mark which specifies the block number up to which all inbound HRMP messages are
+	/// processed.
 	pub hrmp_watermark: RelayChainBlockNumber,
 }
diff --git a/parachain/test-parachains/adder/collator/src/lib.rs b/parachain/test-parachains/adder/collator/src/lib.rs
index 02a4598f9e47..1ac561dda2ba 100644
--- a/parachain/test-parachains/adder/collator/src/lib.rs
+++ b/parachain/test-parachains/adder/collator/src/lib.rs
@@ -147,7 +147,8 @@ impl Collator {
 
 	/// Create the collation function.
 	///
-	/// This collation function can be plugged into the overseer to generate collations for the adder parachain.
+	/// This collation function can be plugged into the overseer to generate collations for the
+	/// adder parachain.
 	pub fn create_collation_function(
 		&self,
 		spawner: impl SpawnNamed + Clone + 'static,
@@ -228,8 +229,9 @@ impl Collator {
 
 	/// Wait until `seconded` collations of this collator are seconded by a parachain validator.
 	///
-	/// The internal counter isn't de-duplicating the collations when counting the number of seconded collations. This
-	/// means when one collation is seconded by X validators, we record X seconded messages.
+	/// The internal counter isn't de-duplicating the collations when counting the number of
+	/// seconded collations. This means when one collation is seconded by X validators, we record X
+	/// seconded messages.
 	pub async fn wait_for_seconded_collations(&self, seconded: u32) {
 		let seconded_collations = self.seconded_collations.clone();
 		loop {
diff --git a/parachain/test-parachains/adder/collator/tests/integration.rs b/parachain/test-parachains/adder/collator/tests/integration.rs
index 9ab1c0c337a6..b891b29db59c 100644
--- a/parachain/test-parachains/adder/collator/tests/integration.rs
+++ b/parachain/test-parachains/adder/collator/tests/integration.rs
@@ -19,7 +19,8 @@
 
 const PUPPET_EXE: &str = env!("CARGO_BIN_EXE_adder_collator_puppet_worker");
 
-// If this test is failing, make sure to run all tests with the `real-overseer` feature being enabled.
+// If this test is failing, make sure to run all tests with the `real-overseer` feature being
+// enabled.
 
 #[substrate_test_utils::test(flavor = "multi_thread")]
 async fn collating_using_adder_collator() {
diff --git a/parachain/test-parachains/undying/collator/src/lib.rs b/parachain/test-parachains/undying/collator/src/lib.rs
index 838590fa16f5..cc0f592dc253 100644
--- a/parachain/test-parachains/undying/collator/src/lib.rs
+++ b/parachain/test-parachains/undying/collator/src/lib.rs
@@ -221,7 +221,8 @@ impl Collator {
 
 	/// Create the collation function.
 	///
-	/// This collation function can be plugged into the overseer to generate collations for the undying parachain.
+	/// This collation function can be plugged into the overseer to generate collations for the
+	/// undying parachain.
 	pub fn create_collation_function(
 		&self,
 		spawner: impl SpawnNamed + Clone + 'static,
@@ -309,8 +310,9 @@ impl Collator {
 
 	/// Wait until `seconded` collations of this collator are seconded by a parachain validator.
 	///
-	/// The internal counter isn't de-duplicating the collations when counting the number of seconded collations. This
-	/// means when one collation is seconded by X validators, we record X seconded messages.
+	/// The internal counter isn't de-duplicating the collations when counting the number of
+	/// seconded collations. This means when one collation is seconded by X validators, we record X
+	/// seconded messages.
 	pub async fn wait_for_seconded_collations(&self, seconded: u32) {
 		let seconded_collations = self.seconded_collations.clone();
 		loop {
diff --git a/parachain/test-parachains/undying/collator/tests/integration.rs b/parachain/test-parachains/undying/collator/tests/integration.rs
index 8ca6eec9aa62..21d174fb06c7 100644
--- a/parachain/test-parachains/undying/collator/tests/integration.rs
+++ b/parachain/test-parachains/undying/collator/tests/integration.rs
@@ -19,7 +19,8 @@
 
 const PUPPET_EXE: &str = env!("CARGO_BIN_EXE_undying_collator_puppet_worker");
 
-// If this test is failing, make sure to run all tests with the `real-overseer` feature being enabled.
+// If this test is failing, make sure to run all tests with the `real-overseer` feature being
+// enabled.
 #[substrate_test_utils::test(flavor = "multi_thread")]
 async fn collating_using_undying_collator() {
 	use polkadot_primitives::Id as ParaId;
diff --git a/primitives/src/runtime_api.rs b/primitives/src/runtime_api.rs
index ec05beea9d5f..c3a150a642e0 100644
--- a/primitives/src/runtime_api.rs
+++ b/primitives/src/runtime_api.rs
@@ -30,10 +30,9 @@
 //! The versioning is achieved with the `api_version` attribute. It can be
 //! placed on:
 //! * trait declaration - represents the base version of the API.
-//! * method declaration (inside a trait declaration) - represents a versioned
-//!   method, which is not available in the base version.
-//! * trait implementation - represents which version of the API is being
-//!   implemented.
+//! * method declaration (inside a trait declaration) - represents a versioned method, which is not
+//!   available in the base version.
+//! * trait implementation - represents which version of the API is being implemented.
 //!
 //! Let's see a quick example:
 //!
@@ -90,14 +89,14 @@
 //! # How versioned methods are used for `ParachainHost`
 //!
 //! Let's introduce two types of `ParachainHost` API implementation:
-//! * stable - used on stable production networks like Polkadot and Kusama. There is only one
-//!   stable API at a single point in time.
+//! * stable - used on stable production networks like Polkadot and Kusama. There is only one stable
+//!   API at a single point in time.
 //! * staging - methods that are ready for production, but will be released on Rococo first. We can
 //!   batch together multiple changes and then release all of them to production, by making staging
 //!   production (bump base version). We can not change or remove any method in staging after a
-//!   release, as this would break Rococo. It should be ok to keep adding methods to staging
-//!   across several releases. For experimental methods, you have to keep them on a separate branch
-//!   until ready.
+//!   release, as this would break Rococo. It should be ok to keep adding methods to staging across
+//!   several releases. For experimental methods, you have to keep them on a separate branch until
+//!   ready.
 //!
 //! The stable version of `ParachainHost` is indicated by the base version of the API. Any staging
 //! method must use `api_version` attribute so that it is assigned to a specific version of a
@@ -111,8 +110,8 @@
 //! ```
 //! indicates a function from the stable `v2` API.
 //!
-//! All staging API functions should use primitives from `vstaging`. They should be clearly separated
-//! from the stable primitives.
+//! All staging API functions should use primitives from `vstaging`. They should be clearly
+//! separated from the stable primitives.
 
 use crate::{
 	vstaging, BlockNumber, CandidateCommitments, CandidateEvent, CandidateHash,
diff --git a/primitives/src/v5/metrics.rs b/primitives/src/v5/metrics.rs
index f947c7392dcb..97f7678e4373 100644
--- a/primitives/src/v5/metrics.rs
+++ b/primitives/src/v5/metrics.rs
@@ -164,8 +164,8 @@ pub mod metric_definitions {
 		};
 
 	/// Counts the number of `imported`, `current` and `concluded_invalid` dispute statements sets
-	/// processed in `process_inherent_data`. The `current` label refers to the disputes statement sets of
-	/// the current session.
+	/// processed in `process_inherent_data`. The `current` label refers to the disputes statement
+	/// sets of the current session.
 	pub const PARACHAIN_INHERENT_DATA_DISPUTE_SETS_PROCESSED: CounterVecDefinition =
 		CounterVecDefinition {
 			name: "polkadot_parachain_inherent_data_dispute_sets_processed",
@@ -174,7 +174,8 @@ pub mod metric_definitions {
 			labels: &["category"],
 		};
 
-	/// Counts the number of `valid` and `invalid` bitfields signature checked in `process_inherent_data`.
+	/// Counts the number of `valid` and `invalid` bitfields signature checked in
+	/// `process_inherent_data`.
 	pub const PARACHAIN_CREATE_INHERENT_BITFIELDS_SIGNATURE_CHECKS: CounterVecDefinition =
 		CounterVecDefinition {
 			name: "polkadot_parachain_create_inherent_bitfields_signature_checks",
@@ -183,7 +184,8 @@ pub mod metric_definitions {
 			labels: &["validity"],
 		};
 
-	/// Measures how much time does it take to verify a single validator signature of a dispute statement
+	/// Measures how much time does it take to verify a single validator signature of a dispute
+	/// statement
 	pub const PARACHAIN_VERIFY_DISPUTE_SIGNATURE: HistogramDefinition =
 		HistogramDefinition {
 			name: "polkadot_parachain_verify_dispute_signature",
diff --git a/primitives/src/v5/mod.rs b/primitives/src/v5/mod.rs
index 3498c0762d4c..bdd10e623190 100644
--- a/primitives/src/v5/mod.rs
+++ b/primitives/src/v5/mod.rs
@@ -103,7 +103,8 @@ pub trait TypeIndex {
 	fn type_index(&self) -> usize;
 }
 
-/// Index of the validator is used as a lightweight replacement of the `ValidatorId` when appropriate.
+/// Index of the validator is used as a lightweight replacement of the `ValidatorId` when
+/// appropriate.
 #[derive(Eq, Ord, PartialEq, PartialOrd, Copy, Clone, Encode, Decode, TypeInfo, RuntimeDebug)]
 #[cfg_attr(feature = "std", derive(Serialize, Deserialize, Hash))]
 pub struct ValidatorIndex(pub u32);
@@ -589,25 +590,27 @@ impl Ord for CommittedCandidateReceipt {
 	}
 }
 
-/// The validation data provides information about how to create the inputs for validation of a candidate.
-/// This information is derived from the chain state and will vary from para to para, although some
-/// fields may be the same for every para.
+/// The validation data provides information about how to create the inputs for validation of a
+/// candidate. This information is derived from the chain state and will vary from para to para,
+/// although some fields may be the same for every para.
 ///
-/// Since this data is used to form inputs to the validation function, it needs to be persisted by the
-/// availability system to avoid dependence on availability of the relay-chain state.
+/// Since this data is used to form inputs to the validation function, it needs to be persisted by
+/// the availability system to avoid dependence on availability of the relay-chain state.
 ///
-/// Furthermore, the validation data acts as a way to authorize the additional data the collator needs
-/// to pass to the validation function. For example, the validation function can check whether the incoming
-/// messages (e.g. downward messages) were actually sent by using the data provided in the validation data
-/// using so called MQC heads.
+/// Furthermore, the validation data acts as a way to authorize the additional data the collator
+/// needs to pass to the validation function. For example, the validation function can check whether
+/// the incoming messages (e.g. downward messages) were actually sent by using the data provided in
+/// the validation data using so called MQC heads.
 ///
-/// Since the commitments of the validation function are checked by the relay-chain, secondary checkers
-/// can rely on the invariant that the relay-chain only includes para-blocks for which these checks have
-/// already been done. As such, there is no need for the validation data used to inform validators and
-/// collators about the checks the relay-chain will perform to be persisted by the availability system.
+/// Since the commitments of the validation function are checked by the relay-chain, secondary
+/// checkers can rely on the invariant that the relay-chain only includes para-blocks for which
+/// these checks have already been done. As such, there is no need for the validation data used to
+/// inform validators and collators about the checks the relay-chain will perform to be persisted by
+/// the availability system.
 ///
-/// The `PersistedValidationData` should be relatively lightweight primarily because it is constructed
-/// during inclusion for each candidate and therefore lies on the critical path of inclusion.
+/// The `PersistedValidationData` should be relatively lightweight primarily because it is
+/// constructed during inclusion for each candidate and therefore lies on the critical path of
+/// inclusion.
 #[derive(PartialEq, Eq, Clone, Encode, Decode, TypeInfo, RuntimeDebug)]
 #[cfg_attr(feature = "std", derive(Default))]
 pub struct PersistedValidationData<H = Hash, N = BlockNumber> {
@@ -642,7 +645,8 @@ pub struct CandidateCommitments<N = BlockNumber> {
 	pub head_data: HeadData,
 	/// The number of messages processed from the DMQ.
 	pub processed_downward_messages: u32,
-	/// The mark which specifies the block number up to which all inbound HRMP messages are processed.
+	/// The mark which specifies the block number up to which all inbound HRMP messages are
+	/// processed.
 	pub hrmp_watermark: N,
 }
 
@@ -677,7 +681,8 @@ pub type UncheckedSignedAvailabilityBitfield = UncheckedSigned<AvailabilityBitfi
 
 /// A set of signed availability bitfields. Should be sorted by validator index, ascending.
 pub type SignedAvailabilityBitfields = Vec<SignedAvailabilityBitfield>;
-/// A set of unchecked signed availability bitfields. Should be sorted by validator index, ascending.
+/// A set of unchecked signed availability bitfields. Should be sorted by validator index,
+/// ascending.
 pub type UncheckedSignedAvailabilityBitfields = Vec<UncheckedSignedAvailabilityBitfield>;
 
 /// A backed (or backable, depending on context) candidate.
@@ -975,8 +980,9 @@ pub enum CoreState<H = Hash, N = BlockNumber> {
 	/// variant.
 	#[codec(index = 1)]
 	Scheduled(ScheduledCore),
-	/// The core is currently free and there is nothing scheduled. This can be the case for parathread
-	/// cores when there are no parathread blocks queued. Parachain cores will never be left idle.
+	/// The core is currently free and there is nothing scheduled. This can be the case for
+	/// parathread cores when there are no parathread blocks queued. Parachain cores will never be
+	/// left idle.
 	#[codec(index = 2)]
 	Free,
 }
@@ -1079,8 +1085,8 @@ impl From<ValidityError> for u8 {
 	}
 }
 
-/// Abridged version of `HostConfiguration` (from the `Configuration` parachains host runtime module)
-/// meant to be used by a parachain or PDK such as cumulus.
+/// Abridged version of `HostConfiguration` (from the `Configuration` parachains host runtime
+/// module) meant to be used by a parachain or PDK such as cumulus.
 #[derive(Clone, Encode, Decode, RuntimeDebug, TypeInfo)]
 #[cfg_attr(feature = "std", derive(PartialEq))]
 pub struct AbridgedHostConfiguration {
@@ -1156,17 +1162,18 @@ pub enum UpgradeRestriction {
 #[derive(Copy, Clone, Encode, Decode, PartialEq, RuntimeDebug, TypeInfo)]
 pub enum UpgradeGoAhead {
 	/// Abort the upgrade process. There is something wrong with the validation code previously
-	/// submitted by the parachain. This variant can also be used to prevent upgrades by the governance
-	/// should an emergency emerge.
+	/// submitted by the parachain. This variant can also be used to prevent upgrades by the
+	/// governance should an emergency emerge.
 	///
 	/// The expected reaction on this variant is that the parachain will admit this message and
 	/// remove all the data about the pending upgrade. Depending on the nature of the problem (to
-	/// be examined offchain for now), it can try to send another validation code or just retry later.
+	/// be examined offchain for now), it can try to send another validation code or just retry
+	/// later.
 	#[codec(index = 0)]
 	Abort,
-	/// Apply the pending code change. The parablock that is built on a relay-parent that is descendant
-	/// of the relay-parent where the parachain observed this signal must use the upgraded validation
-	/// code.
+	/// Apply the pending code change. The parablock that is built on a relay-parent that is
+	/// descendant of the relay-parent where the parachain observed this signal must use the
+	/// upgraded validation code.
 	#[codec(index = 1)]
 	GoAhead,
 }
@@ -1646,7 +1653,7 @@ pub const fn supermajority_threshold(n: usize) -> usize {
 #[derive(Clone, Encode, Decode, RuntimeDebug, TypeInfo)]
 #[cfg_attr(feature = "std", derive(PartialEq))]
 pub struct SessionInfo {
-	/****** New in v2 *******/
+	/****** New in v2 ****** */
 	/// All the validators actively participating in parachain consensus.
 	/// Indices are into the broader validator set.
 	pub active_validator_indices: Vec<ValidatorIndex>,
@@ -1655,11 +1662,11 @@ pub struct SessionInfo {
 	/// The amount of sessions to keep for disputes.
 	pub dispute_period: SessionIndex,
 
-	/****** Old fields ******/
+	/****** Old fields ***** */
 	/// Validators in canonical ordering.
 	///
-	/// NOTE: There might be more authorities in the current session, than `validators` participating
-	/// in parachain consensus. See
+	/// NOTE: There might be more authorities in the current session, than `validators`
+	/// participating in parachain consensus. See
 	/// [`max_validators`](https://github.com/paritytech/polkadot/blob/a52dca2be7840b23c19c153cf7e110b1e3e475f8/runtime/parachains/src/configuration.rs#L148).
 	///
 	/// `SessionInfo::validators` will be limited to to `max_validators` when set.
@@ -1667,8 +1674,8 @@ pub struct SessionInfo {
 	/// Validators' authority discovery keys for the session in canonical ordering.
 	///
 	/// NOTE: The first `validators.len()` entries will match the corresponding validators in
-	/// `validators`, afterwards any remaining authorities can be found. This is any authorities not
-	/// participating in parachain consensus - see
+	/// `validators`, afterwards any remaining authorities can be found. This is any authorities
+	/// not participating in parachain consensus - see
 	/// [`max_validators`](https://github.com/paritytech/polkadot/blob/a52dca2be7840b23c19c153cf7e110b1e3e475f8/runtime/parachains/src/configuration.rs#L148)
 	pub discovery_keys: Vec<AuthorityDiscoveryId>,
 	/// The assignment keys for validators.
@@ -1679,8 +1686,8 @@ pub struct SessionInfo {
 	///
 	/// Therefore:
 	/// ```ignore
-	///		assignment_keys.len() == validators.len() && validators.len() <= discovery_keys.len()
-	///	```
+	/// 		assignment_keys.len() == validators.len() && validators.len() <= discovery_keys.len()
+	/// 	```
 	pub assignment_keys: Vec<AssignmentId>,
 	/// Validators in shuffled ordering - these are the validator groups as produced
 	/// by the `Scheduler` module for the session and are typically referred to by
diff --git a/primitives/test-helpers/src/lib.rs b/primitives/test-helpers/src/lib.rs
index ac7af5b5fa7d..a8fc0f7ccc26 100644
--- a/primitives/test-helpers/src/lib.rs
+++ b/primitives/test-helpers/src/lib.rs
@@ -17,7 +17,8 @@
 #![forbid(unused_crate_dependencies)]
 #![forbid(unused_extern_crates)]
 
-//! A set of primitive constructors, to aid in crafting meaningful testcase while reducing repetition.
+//! A set of primitive constructors, to aid in crafting meaningful testcase while reducing
+//! repetition.
 //!
 //! Note that `dummy_` prefixed values are meant to be fillers, that should not matter, and will
 //! contain randomness based data.
diff --git a/runtime/common/slot_range_helper/src/lib.rs b/runtime/common/slot_range_helper/src/lib.rs
index 626232032fbd..bbe5b61ae1f3 100644
--- a/runtime/common/slot_range_helper/src/lib.rs
+++ b/runtime/common/slot_range_helper/src/lib.rs
@@ -36,15 +36,15 @@ pub use sp_std::{ops::Add, result};
 ///
 /// This will generate an enum `SlotRange` with the following properties:
 ///
-/// * Enum variants will range from all consecutive combinations of inputs, i.e.
-///   `ZeroZero`, `ZeroOne`, `ZeroTwo`, `ZeroThree`, `OneOne`, `OneTwo`, `OneThree`...
+/// * Enum variants will range from all consecutive combinations of inputs, i.e. `ZeroZero`,
+///   `ZeroOne`, `ZeroTwo`, `ZeroThree`, `OneOne`, `OneTwo`, `OneThree`...
 /// * A constant `LEASE_PERIODS_PER_SLOT` will count the number of lease periods.
 /// * A constant `SLOT_RANGE_COUNT` will count the total number of enum variants.
 /// * A function `as_pair` will return a tuple representation of the `SlotRange`.
 /// * A function `intersects` will tell you if two slot ranges intersect with one another.
 /// * A function `len` will tell you the length of occupying a `SlotRange`.
-/// * A function `new_bounded` will generate a `SlotRange` from an input of the current
-///   lease period, the starting lease period, and the final lease period.
+/// * A function `new_bounded` will generate a `SlotRange` from an input of the current lease
+///   period, the starting lease period, and the final lease period.
 #[macro_export]
 macro_rules! generate_slot_range{
 	// Entry point
diff --git a/runtime/common/src/assigned_slots.rs b/runtime/common/src/assigned_slots.rs
index 4424738c9835..b3c1381c9ec9 100644
--- a/runtime/common/src/assigned_slots.rs
+++ b/runtime/common/src/assigned_slots.rs
@@ -322,7 +322,8 @@ pub mod pallet {
 					},
 					Err(err) => {
 						// Treat failed lease creation as warning .. slot will be allocated a lease
-						// in a subsequent lease period by the `allocate_temporary_slot_leases` function.
+						// in a subsequent lease period by the `allocate_temporary_slot_leases`
+						// function.
 						log::warn!(target: "assigned_slots",
 							"Failed to allocate a temp slot for para {:?} at period {:?}: {:?}",
 							id, current_lease_period, err
@@ -398,7 +399,8 @@ impl<T: Config> Pallet<T> {
 	/// total number of lease (lower first), and then when they last a turn (older ones first).
 	/// If any remaining ex-aequo, we just take the para ID in ascending order as discriminator.
 	///
-	/// Assigned slots with a `period_begin` bigger than current lease period are not considered (yet).
+	/// Assigned slots with a `period_begin` bigger than current lease period are not considered
+	/// (yet).
 	///
 	/// The function will call out to `Leaser::lease_out` to create the appropriate slot leases.
 	fn allocate_temporary_slot_leases(lease_period_index: LeasePeriodOf<T>) -> DispatchResult {
@@ -525,7 +527,8 @@ impl<T: Config> Pallet<T> {
 
 	/// Handles start of a lease period.
 	fn manage_lease_period_start(lease_period_index: LeasePeriodOf<T>) -> Weight {
-		// Note: leases that have ended in previous lease period, should have been cleaned in slots pallet.
+		// Note: leases that have ended in previous lease period, should have been cleaned in slots
+		// pallet.
 		if let Err(err) = Self::allocate_temporary_slot_leases(lease_period_index) {
 			log::error!(target: "assigned_slots",
 				"Allocating slots failed for lease period {:?}, with: {:?}",
diff --git a/runtime/common/src/auctions.rs b/runtime/common/src/auctions.rs
index 7ab12eec7998..901c9c27da28 100644
--- a/runtime/common/src/auctions.rs
+++ b/runtime/common/src/auctions.rs
@@ -138,8 +138,8 @@ pub mod pallet {
 		Reserved { bidder: T::AccountId, extra_reserved: BalanceOf<T>, total_amount: BalanceOf<T> },
 		/// Funds were unreserved since bidder is no longer active. `[bidder, amount]`
 		Unreserved { bidder: T::AccountId, amount: BalanceOf<T> },
-		/// Someone attempted to lease the same slot twice for a parachain. The amount is held in reserve
-		/// but no parachain slot has been leased.
+		/// Someone attempted to lease the same slot twice for a parachain. The amount is held in
+		/// reserve but no parachain slot has been leased.
 		ReserveConfiscated { para_id: ParaId, leaser: T::AccountId, amount: BalanceOf<T> },
 		/// A new bid has been accepted as the current winner.
 		BidAccepted {
@@ -149,7 +149,8 @@ pub mod pallet {
 			first_slot: LeasePeriodOf<T>,
 			last_slot: LeasePeriodOf<T>,
 		},
-		/// The winning offset was chosen for an auction. This will map into the `Winning` storage map.
+		/// The winning offset was chosen for an auction. This will map into the `Winning` storage
+		/// map.
 		WinningOffset { auction_index: AuctionIndex, block_number: BlockNumberFor<T> },
 	}
 
@@ -217,9 +218,9 @@ pub mod pallet {
 		fn on_initialize(n: BlockNumberFor<T>) -> Weight {
 			let mut weight = T::DbWeight::get().reads(1);
 
-			// If the current auction was in its ending period last block, then ensure that the (sub-)range
-			// winner information is duplicated from the previous block in case no bids happened in the
-			// last block.
+			// If the current auction was in its ending period last block, then ensure that the
+			// (sub-)range winner information is duplicated from the previous block in case no bids
+			// happened in the last block.
 			if let AuctionStatus::EndingPeriod(offset, _sub_sample) = Self::auction_status(n) {
 				weight = weight.saturating_add(T::DbWeight::get().reads(1));
 				if !Winning::<T>::contains_key(&offset) {
@@ -555,8 +556,9 @@ impl<T: Config> Pallet<T> {
 					});
 					let res = Winning::<T>::get(offset)
 						.unwrap_or([Self::EMPTY; SlotRange::SLOT_RANGE_COUNT]);
-					// This `remove_all` statement should remove at most `EndingPeriod` / `SampleLength` items,
-					// which should be bounded and sensibly configured in the runtime.
+					// This `remove_all` statement should remove at most `EndingPeriod` /
+					// `SampleLength` items, which should be bounded and sensibly configured in the
+					// runtime.
 					#[allow(deprecated)]
 					Winning::<T>::remove_all(None);
 					AuctionInfo::<T>::kill();
@@ -574,8 +576,8 @@ impl<T: Config> Pallet<T> {
 		auction_lease_period_index: LeasePeriodOf<T>,
 		winning_ranges: WinningData<T>,
 	) {
-		// First, unreserve all amounts that were reserved for the bids. We will later re-reserve the
-		// amounts from the bidders that ended up being assigned the slot so there's no need to
+		// First, unreserve all amounts that were reserved for the bids. We will later re-reserve
+		// the amounts from the bidders that ended up being assigned the slot so there's no need to
 		// special-case them here.
 		for ((bidder, _), amount) in ReservedAmounts::<T>::drain() {
 			CurrencyOf::<T>::unreserve(&bidder, amount);
@@ -596,12 +598,12 @@ impl<T: Config> Pallet<T> {
 				Err(LeaseError::ReserveFailed) |
 				Err(LeaseError::AlreadyEnded) |
 				Err(LeaseError::NoLeasePeriod) => {
-					// Should never happen since we just unreserved this amount (and our offset is from the
-					// present period). But if it does, there's not much we can do.
+					// Should never happen since we just unreserved this amount (and our offset is
+					// from the present period). But if it does, there's not much we can do.
 				},
 				Err(LeaseError::AlreadyLeased) => {
-					// The leaser attempted to get a second lease on the same para ID, possibly griefing us. Let's
-					// keep the amount reserved and let governance sort it out.
+					// The leaser attempted to get a second lease on the same para ID, possibly
+					// griefing us. Let's keep the amount reserved and let governance sort it out.
 					if CurrencyOf::<T>::reserve(&leaser, amount).is_ok() {
 						Self::deposit_event(Event::<T>::ReserveConfiscated {
 							para_id: para,
@@ -1123,11 +1125,11 @@ mod tests {
 				Auctions::auction_status(System::block_number()),
 				AuctionStatus::<u32>::EndingPeriod(2, 0)
 			);
-			// This will prevent the auction's winner from being decided in the next block, since the random
-			// seed was known before the final bids were made.
+			// This will prevent the auction's winner from being decided in the next block, since
+			// the random seed was known before the final bids were made.
 			set_last_random(H256::zero(), 8);
-			// Auction definitely ended now, but we don't know exactly when in the last 3 blocks yet since
-			// no randomness available yet.
+			// Auction definitely ended now, but we don't know exactly when in the last 3 blocks yet
+			// since no randomness available yet.
 			run_to_block(9);
 			// Auction has now ended... But auction winner still not yet decided, so no leases yet.
 			assert_eq!(
@@ -1136,8 +1138,8 @@ mod tests {
 			);
 			assert_eq!(leases(), vec![]);
 
-			// Random seed now updated to a value known at block 9, when the auction ended. This means
-			// that the winner can now be chosen.
+			// Random seed now updated to a value known at block 9, when the auction ended. This
+			// means that the winner can now be chosen.
 			set_last_random(H256::zero(), 9);
 			run_to_block(10);
 			// Auction ended and winner selected
diff --git a/runtime/common/src/claims.rs b/runtime/common/src/claims.rs
index 6a41a8f3f472..9cc06b2bede2 100644
--- a/runtime/common/src/claims.rs
+++ b/runtime/common/src/claims.rs
@@ -193,8 +193,8 @@ pub mod pallet {
 		SignerHasNoClaim,
 		/// Account ID sending transaction has no claim.
 		SenderHasNoClaim,
-		/// There's not enough in the pot to pay out some unvested amount. Generally implies a logic
-		/// error.
+		/// There's not enough in the pot to pay out some unvested amount. Generally implies a
+		/// logic error.
 		PotUnderflow,
 		/// A needed statement was not included.
 		InvalidStatement,
@@ -288,8 +288,8 @@ pub mod pallet {
 		///
 		/// Parameters:
 		/// - `dest`: The destination account to payout the claim.
-		/// - `ethereum_signature`: The signature of an ethereum signed message
-		///    matching the format described above.
+		/// - `ethereum_signature`: The signature of an ethereum signed message matching the format
+		///   described above.
 		///
 		/// <weight>
 		/// The weight of this call is invariant over the input parameters.
@@ -368,9 +368,10 @@ pub mod pallet {
 		///
 		/// Parameters:
 		/// - `dest`: The destination account to payout the claim.
-		/// - `ethereum_signature`: The signature of an ethereum signed message
-		///    matching the format described above.
-		/// - `statement`: The identity of the statement which is being attested to in the signature.
+		/// - `ethereum_signature`: The signature of an ethereum signed message matching the format
+		///   described above.
+		/// - `statement`: The identity of the statement which is being attested to in the
+		///   signature.
 		///
 		/// <weight>
 		/// The weight of this call is invariant over the input parameters.
@@ -400,14 +401,16 @@ pub mod pallet {
 
 		/// Attest to a statement, needed to finalize the claims process.
 		///
-		/// WARNING: Insecure unless your chain includes `PrevalidateAttests` as a `SignedExtension`.
+		/// WARNING: Insecure unless your chain includes `PrevalidateAttests` as a
+		/// `SignedExtension`.
 		///
 		/// Unsigned Validation:
 		/// A call to attest is deemed valid if the sender has a `Preclaim` registered
 		/// and provides a `statement` which is expected for the account.
 		///
 		/// Parameters:
-		/// - `statement`: The identity of the statement which is being attested to in the signature.
+		/// - `statement`: The identity of the statement which is being attested to in the
+		///   signature.
 		///
 		/// <weight>
 		/// The weight of this call is invariant over the input parameters.
diff --git a/runtime/common/src/crowdloan/migration.rs b/runtime/common/src/crowdloan/migration.rs
index 4a47f3283de3..03c4ab6c3119 100644
--- a/runtime/common/src/crowdloan/migration.rs
+++ b/runtime/common/src/crowdloan/migration.rs
@@ -134,8 +134,8 @@ pub mod crowdloan_index_migration {
 		Ok(())
 	}
 
-	/// This migration converts crowdloans to use a crowdloan index rather than the parachain id as a
-	/// unique identifier. This makes it easier to swap two crowdloans between parachains.
+	/// This migration converts crowdloans to use a crowdloan index rather than the parachain id as
+	/// a unique identifier. This makes it easier to swap two crowdloans between parachains.
 	pub fn migrate<T: Config>() -> frame_support::weights::Weight {
 		let mut weight = Weight::zero();
 
diff --git a/runtime/common/src/crowdloan/mod.rs b/runtime/common/src/crowdloan/mod.rs
index 18c86e68e5df..1db046c52701 100644
--- a/runtime/common/src/crowdloan/mod.rs
+++ b/runtime/common/src/crowdloan/mod.rs
@@ -45,9 +45,9 @@
 //! slot auction enters its ending period, then parachains will each place a bid; the bid will be
 //! raised once per block if the parachain had additional funds contributed since the last bid.
 //!
-//! Successful funds remain tracked (in the `Funds` storage item and the associated child trie) as long as
-//! the parachain remains active. Users can withdraw their funds once the slot is completed and funds are
-//! returned to the crowdloan account.
+//! Successful funds remain tracked (in the `Funds` storage item and the associated child trie) as
+//! long as the parachain remains active. Users can withdraw their funds once the slot is completed
+//! and funds are returned to the crowdloan account.
 
 pub mod migration;
 
@@ -164,11 +164,11 @@ pub struct FundInfo<AccountId, Balance, BlockNumber, LeasePeriod> {
 	/// If this is `Ending(n)`, this fund received a contribution during the current ending period,
 	/// where `n` is how far into the ending period the contribution was made.
 	pub last_contribution: LastContribution<BlockNumber>,
-	/// First lease period in range to bid on; it's actually a `LeasePeriod`, but that's the same type
-	/// as `BlockNumber`.
+	/// First lease period in range to bid on; it's actually a `LeasePeriod`, but that's the same
+	/// type as `BlockNumber`.
 	pub first_period: LeasePeriod,
-	/// Last lease period in range to bid on; it's actually a `LeasePeriod`, but that's the same type
-	/// as `BlockNumber`.
+	/// Last lease period in range to bid on; it's actually a `LeasePeriod`, but that's the same
+	/// type as `BlockNumber`.
 	pub last_period: LeasePeriod,
 	/// Unique index used to represent this fund.
 	pub fund_index: FundIndex,
@@ -192,15 +192,16 @@ pub mod pallet {
 	pub trait Config: frame_system::Config {
 		type RuntimeEvent: From<Event<Self>> + IsType<<Self as frame_system::Config>::RuntimeEvent>;
 
-		/// `PalletId` for the crowdloan pallet. An appropriate value could be `PalletId(*b"py/cfund")`
+		/// `PalletId` for the crowdloan pallet. An appropriate value could be
+		/// `PalletId(*b"py/cfund")`
 		#[pallet::constant]
 		type PalletId: Get<PalletId>;
 
 		/// The amount to be held on deposit by the depositor of a crowdloan.
 		type SubmissionDeposit: Get<BalanceOf<Self>>;
 
-		/// The minimum amount that may be contributed into a crowdloan. Should almost certainly be at
-		/// least `ExistentialDeposit`.
+		/// The minimum amount that may be contributed into a crowdloan. Should almost certainly be
+		/// at least `ExistentialDeposit`.
 		#[pallet::constant]
 		type MinContribution: Get<BalanceOf<Self>>;
 
@@ -208,8 +209,8 @@ pub mod pallet {
 		#[pallet::constant]
 		type RemoveKeysLimit: Get<u32>;
 
-		/// The parachain registrar type. We just use this to ensure that only the manager of a para is able to
-		/// start a crowdloan for its slot.
+		/// The parachain registrar type. We just use this to ensure that only the manager of a para
+		/// is able to start a crowdloan for its slot.
 		type Registrar: Registrar<AccountId = Self::AccountId>;
 
 		/// The type representing the auctioning system.
@@ -314,7 +315,8 @@ pub mod pallet {
 		FundNotEnded,
 		/// There are no contributions stored in this crowdloan.
 		NoContributions,
-		/// The crowdloan is not ready to dissolve. Potentially still has a slot or in retirement period.
+		/// The crowdloan is not ready to dissolve. Potentially still has a slot or in retirement
+		/// period.
 		NotReadyToDissolve,
 		/// Invalid signature.
 		InvalidSignature,
@@ -342,8 +344,9 @@ pub mod pallet {
 				for (fund, para_id) in
 					new_raise.into_iter().filter_map(|i| Self::funds(i).map(|f| (f, i)))
 				{
-					// Care needs to be taken by the crowdloan creator that this function will succeed given
-					// the crowdloaning configuration. We do some checks ahead of time in crowdloan `create`.
+					// Care needs to be taken by the crowdloan creator that this function will
+					// succeed given the crowdloaning configuration. We do some checks ahead of time
+					// in crowdloan `create`.
 					let result = T::Auctioneer::place_bid(
 						Self::fund_account_id(fund.fund_index),
 						para_id,
@@ -363,7 +366,8 @@ pub mod pallet {
 
 	#[pallet::call]
 	impl<T: Config> Pallet<T> {
-		/// Create a new crowdloaning campaign for a parachain slot with the given lease period range.
+		/// Create a new crowdloaning campaign for a parachain slot with the given lease period
+		/// range.
 		///
 		/// This applies a lock to your parachain configuration, ensuring that it cannot be changed
 		/// by the parachain manager.
@@ -462,16 +466,16 @@ pub mod pallet {
 		///
 		/// Origin must be signed, but can come from anyone.
 		///
-		/// The fund must be either in, or ready for, retirement. For a fund to be *in* retirement, then the retirement
-		/// flag must be set. For a fund to be ready for retirement, then:
+		/// The fund must be either in, or ready for, retirement. For a fund to be *in* retirement,
+		/// then the retirement flag must be set. For a fund to be ready for retirement, then:
 		/// - it must not already be in retirement;
 		/// - the amount of raised funds must be bigger than the _free_ balance of the account;
 		/// - and either:
 		///   - the block number must be at least `end`; or
 		///   - the current lease period must be greater than the fund's `last_period`.
 		///
-		/// In this case, the fund's retirement flag is set and its `end` is reset to the current block
-		/// number.
+		/// In this case, the fund's retirement flag is set and its `end` is reset to the current
+		/// block number.
 		///
 		/// - `who`: The account whose contribution should be withdrawn.
 		/// - `index`: The parachain to whose crowdloan the contribution was made.
@@ -653,8 +657,9 @@ pub mod pallet {
 			Ok(())
 		}
 
-		/// Contribute your entire balance to a crowd sale. This will transfer the entire balance of a user over to fund a parachain
-		/// slot. It will be withdrawable when the crowdloan has ended and the funds are unused.
+		/// Contribute your entire balance to a crowd sale. This will transfer the entire balance of
+		/// a user over to fund a parachain slot. It will be withdrawable when the crowdloan has
+		/// ended and the funds are unused.
 		#[pallet::call_index(8)]
 		#[pallet::weight(T::WeightInfo::contribute())]
 		pub fn contribute_all(
@@ -719,8 +724,8 @@ impl<T: Config> Pallet<T> {
 	}
 
 	/// This function checks all conditions which would qualify a crowdloan has ended.
-	/// * If we have reached the `fund.end` block OR the first lease period the fund is
-	///   trying to bid for has started already.
+	/// * If we have reached the `fund.end` block OR the first lease period the fund is trying to
+	///   bid for has started already.
 	/// * And, if the fund has enough free funds to refund full raised amount.
 	fn ensure_crowdloan_ended(
 		now: BlockNumberFor<T>,
@@ -775,8 +780,8 @@ impl<T: Config> Pallet<T> {
 			Error::<T>::BidOrLeaseActive
 		);
 
-		// We disallow any crowdloan contributions during the VRF Period, so that people do not sneak their
-		// contributions into the auction when it would not impact the outcome.
+		// We disallow any crowdloan contributions during the VRF Period, so that people do not
+		// sneak their contributions into the auction when it would not impact the outcome.
 		ensure!(!T::Auctioneer::auction_status(now).is_vrf(), Error::<T>::VrfDelayInProgress);
 
 		let (old_balance, memo) = Self::contribution_get(fund.fund_index, &who);
@@ -1287,7 +1292,8 @@ mod tests {
 			);
 
 			// Cannot create a crowdloan with nonsense end date
-			// This crowdloan would end in lease period 2, but is bidding for some slot that starts in lease period 1.
+			// This crowdloan would end in lease period 2, but is bidding for some slot that starts
+			// in lease period 1.
 			assert_noop!(
 				Crowdloan::create(RuntimeOrigin::signed(1), para, 1000, 1, 4, 41, None),
 				Error::<Test>::EndTooFarInFuture
@@ -1457,7 +1463,8 @@ mod tests {
 			let para_2 = new_para();
 			let index = NextFundIndex::<Test>::get();
 			assert_ok!(Crowdloan::create(RuntimeOrigin::signed(1), para_2, 1000, 1, 4, 40, None));
-			// Emulate a win by leasing out and putting a deposit. Slots pallet would normally do this.
+			// Emulate a win by leasing out and putting a deposit. Slots pallet would normally do
+			// this.
 			let crowdloan_account = Crowdloan::fund_account_id(index);
 			set_winner(para_2, crowdloan_account, true);
 			assert_noop!(
@@ -1465,8 +1472,8 @@ mod tests {
 				Error::<Test>::BidOrLeaseActive
 			);
 
-			// Move past lease period 1, should not be allowed to have further contributions with a crowdloan
-			// that has starting period 1.
+			// Move past lease period 1, should not be allowed to have further contributions with a
+			// crowdloan that has starting period 1.
 			let para_3 = new_para();
 			assert_ok!(Crowdloan::create(RuntimeOrigin::signed(1), para_3, 1000, 1, 4, 40, None));
 			run_to_block(40);
diff --git a/runtime/common/src/integration_tests.rs b/runtime/common/src/integration_tests.rs
index fa21fbf9ef69..34a49bc230b6 100644
--- a/runtime/common/src/integration_tests.rs
+++ b/runtime/common/src/integration_tests.rs
@@ -471,7 +471,8 @@ fn basic_end_to_end_works() {
 			);
 			assert_eq!(
 				slots::Leases::<Test>::get(ParaId::from(para_2)),
-				// -- 1 --- 2 --- 3 --- 4 --- 5 ---------------- 6 --------------------------- 7 ----------------
+				// -- 1 --- 2 --- 3 --- 4 --- 5 ---------------- 6 --------------------------- 7
+				// ----------------
 				vec![
 					None,
 					None,
@@ -599,7 +600,8 @@ fn basic_errors_fail() {
 
 #[test]
 fn competing_slots() {
-	// This test will verify that competing slots, from different sources will resolve appropriately.
+	// This test will verify that competing slots, from different sources will resolve
+	// appropriately.
 	new_test_ext().execute_with(|| {
 		assert!(System::block_number().is_one());
 		let max_bids = 10u32;
@@ -789,7 +791,8 @@ fn competing_bids() {
 		let crowdloan_1 = Crowdloan::fund_account_id(fund_1.fund_index);
 		assert_eq!(
 			slots::Leases::<Test>::get(ParaId::from(2000)),
-			// -- 1 --- 2 --- 3 --- 4 --- 5 ------------- 6 ------------------------ 7 -------------
+			// -- 1 --- 2 --- 3 --- 4 --- 5 ------------- 6 ------------------------ 7
+			// -------------
 			vec![
 				None,
 				None,
diff --git a/runtime/common/src/paras_registrar.rs b/runtime/common/src/paras_registrar.rs
index 550f443a5a78..57d9e21bcf53 100644
--- a/runtime/common/src/paras_registrar.rs
+++ b/runtime/common/src/paras_registrar.rs
@@ -107,9 +107,9 @@ pub mod pallet {
 		type RuntimeEvent: From<Event<Self>> + IsType<<Self as frame_system::Config>::RuntimeEvent>;
 
 		/// The aggregated origin type must support the `parachains` origin. We require that we can
-		/// infallibly convert between this origin and the system origin, but in reality, they're the
-		/// same type, we just can't express that to the Rust type system without writing a `where`
-		/// clause everywhere.
+		/// infallibly convert between this origin and the system origin, but in reality, they're
+		/// the same type, we just can't express that to the Rust type system without writing a
+		/// `where` clause everywhere.
 		type RuntimeOrigin: From<<Self as frame_system::Config>::RuntimeOrigin>
 			+ Into<result::Result<Origin, <Self as Config>::RuntimeOrigin>>;
 
@@ -163,14 +163,15 @@ pub mod pallet {
 		CannotDowngrade,
 		/// Cannot schedule upgrade of parathread to parachain
 		CannotUpgrade,
-		/// Para is locked from manipulation by the manager. Must use parachain or relay chain governance.
+		/// Para is locked from manipulation by the manager. Must use parachain or relay chain
+		/// governance.
 		ParaLocked,
 		/// The ID given for registration has not been reserved.
 		NotReserved,
 		/// Registering parachain with empty code is not allowed.
 		EmptyCode,
-		/// Cannot perform a parachain slot / lifecycle swap. Check that the state of both paras are
-		/// correct for the swap to work.
+		/// Cannot perform a parachain slot / lifecycle swap. Check that the state of both paras
+		/// are correct for the swap to work.
 		CannotSwap,
 	}
 
@@ -180,8 +181,8 @@ pub mod pallet {
 
 	/// Amount held on deposit for each para and the original depositor.
 	///
-	/// The given account ID is responsible for registering the code and initial head data, but may only do
-	/// so if it isn't yet registered. (After that, it's up to governance to do so.)
+	/// The given account ID is responsible for registering the code and initial head data, but may
+	/// only do so if it isn't yet registered. (After that, it's up to governance to do so.)
 	#[pallet::storage]
 	pub type Paras<T: Config> =
 		StorageMap<_, Twox64Concat, ParaId, ParaInfo<T::AccountId, BalanceOf<T>>>;
@@ -224,8 +225,8 @@ pub mod pallet {
 		/// - `validation_code`: The initial validation code of the parachain/thread.
 		///
 		/// ## Deposits/Fees
-		/// The origin signed account must reserve a corresponding deposit for the registration. Anything already
-		/// reserved previously for this para ID is accounted for.
+		/// The origin signed account must reserve a corresponding deposit for the registration.
+		/// Anything already reserved previously for this para ID is accounted for.
 		///
 		/// ## Events
 		/// The `Registered` event is emitted in case of success.
@@ -264,7 +265,8 @@ pub mod pallet {
 
 		/// Deregister a Para Id, freeing all data and returning any deposit.
 		///
-		/// The caller must be Root, the `para` owner, or the `para` itself. The para must be a parathread.
+		/// The caller must be Root, the `para` owner, or the `para` itself. The para must be a
+		/// parathread.
 		#[pallet::call_index(2)]
 		#[pallet::weight(<T as Config>::WeightInfo::deregister())]
 		pub fn deregister(origin: OriginFor<T>, id: ParaId) -> DispatchResult {
@@ -345,17 +347,20 @@ pub mod pallet {
 		/// Reserve a Para Id on the relay chain.
 		///
 		/// This function will reserve a new Para Id to be owned/managed by the origin account.
-		/// The origin account is able to register head data and validation code using `register` to create
-		/// a parathread. Using the Slots pallet, a parathread can then be upgraded to get a parachain slot.
+		/// The origin account is able to register head data and validation code using `register` to
+		/// create a parathread. Using the Slots pallet, a parathread can then be upgraded to get a
+		/// parachain slot.
 		///
 		/// ## Arguments
-		/// - `origin`: Must be called by a `Signed` origin. Becomes the manager/owner of the new para ID.
+		/// - `origin`: Must be called by a `Signed` origin. Becomes the manager/owner of the new
+		///   para ID.
 		///
 		/// ## Deposits/Fees
 		/// The origin must reserve a deposit of `ParaDeposit` for the registration.
 		///
 		/// ## Events
-		/// The `Reserved` event is emitted in case of success, which provides the ID reserved for use.
+		/// The `Reserved` event is emitted in case of success, which provides the ID reserved for
+		/// use.
 		#[pallet::call_index(5)]
 		#[pallet::weight(<T as Config>::WeightInfo::reserve())]
 		pub fn reserve(origin: OriginFor<T>) -> DispatchResult {
@@ -369,7 +374,8 @@ pub mod pallet {
 		/// Add a manager lock from a para. This will prevent the manager of a
 		/// para to deregister or swap a para.
 		///
-		/// Can be called by Root, the parachain, or the parachain manager if the parachain is unlocked.
+		/// Can be called by Root, the parachain, or the parachain manager if the parachain is
+		/// unlocked.
 		#[pallet::call_index(6)]
 		#[pallet::weight(T::DbWeight::get().reads_writes(1, 1))]
 		pub fn add_lock(origin: OriginFor<T>, para: ParaId) -> DispatchResult {
@@ -380,7 +386,8 @@ pub mod pallet {
 
 		/// Schedule a parachain upgrade.
 		///
-		/// Can be called by Root, the parachain, or the parachain manager if the parachain is unlocked.
+		/// Can be called by Root, the parachain, or the parachain manager if the parachain is
+		/// unlocked.
 		#[pallet::call_index(7)]
 		#[pallet::weight(<T as Config>::WeightInfo::schedule_code_upgrade(new_code.0.len() as u32))]
 		pub fn schedule_code_upgrade(
@@ -395,7 +402,8 @@ pub mod pallet {
 
 		/// Set the parachain's current head.
 		///
-		/// Can be called by Root, the parachain, or the parachain manager if the parachain is unlocked.
+		/// Can be called by Root, the parachain, or the parachain manager if the parachain is
+		/// unlocked.
 		#[pallet::call_index(8)]
 		#[pallet::weight(<T as Config>::WeightInfo::set_current_head(new_head.0.len() as u32))]
 		pub fn set_current_head(
diff --git a/runtime/common/src/paras_sudo_wrapper.rs b/runtime/common/src/paras_sudo_wrapper.rs
index 8944e932e9ef..d18eb8650aaf 100644
--- a/runtime/common/src/paras_sudo_wrapper.rs
+++ b/runtime/common/src/paras_sudo_wrapper.rs
@@ -45,8 +45,8 @@ pub mod pallet {
 		ParaDoesntExist,
 		/// The specified parachain or parathread is already registered.
 		ParaAlreadyExists,
-		/// A DMP message couldn't be sent because it exceeds the maximum size allowed for a downward
-		/// message.
+		/// A DMP message couldn't be sent because it exceeds the maximum size allowed for a
+		/// downward message.
 		ExceedsMaxMessageSize,
 		/// Could not schedule para cleanup.
 		CouldntCleanup,
@@ -127,8 +127,8 @@ pub mod pallet {
 
 		/// Send a downward XCM to the given para.
 		///
-		/// The given parachain should exist and the payload should not exceed the preconfigured size
-		/// `config.max_downward_message_size`.
+		/// The given parachain should exist and the payload should not exceed the preconfigured
+		/// size `config.max_downward_message_size`.
 		#[pallet::call_index(4)]
 		#[pallet::weight((1_000, DispatchClass::Operational))]
 		pub fn sudo_queue_downward_xcm(
diff --git a/runtime/common/src/purchase.rs b/runtime/common/src/purchase.rs
index 246511a5d3d8..72795a733ea9 100644
--- a/runtime/common/src/purchase.rs
+++ b/runtime/common/src/purchase.rs
@@ -82,7 +82,8 @@ pub struct AccountStatus<Balance> {
 	locked_balance: Balance,
 	/// Their sr25519/ed25519 signature verifying they have signed our required statement.
 	signature: Vec<u8>,
-	/// The percentage of VAT the purchaser is responsible for. This is already factored into account balance.
+	/// The percentage of VAT the purchaser is responsible for. This is already factored into
+	/// account balance.
 	vat: Permill,
 }
 
@@ -333,12 +334,14 @@ pub mod pallet {
 
 					if !status.locked_balance.is_zero() {
 						let unlock_block = UnlockBlock::<T>::get();
-						// We allow some configurable portion of the purchased locked DOTs to be unlocked for basic usage.
+						// We allow some configurable portion of the purchased locked DOTs to be
+						// unlocked for basic usage.
 						let unlocked = (T::UnlockedProportion::get() * status.locked_balance)
 							.min(T::MaxUnlocked::get());
 						let locked = status.locked_balance.saturating_sub(unlocked);
-						// We checked that this account has no existing vesting schedule. So this function should
-						// never fail, however if it does, not much we can do about it at this point.
+						// We checked that this account has no existing vesting schedule. So this
+						// function should never fail, however if it does, not much we can do about
+						// it at this point.
 						let _ = T::VestingSchedule::add_vesting_schedule(
 							// Apply vesting schedule to this user
 							&who,
@@ -351,7 +354,8 @@ pub mod pallet {
 						);
 					}
 
-					// Setting the user account to `Completed` ends the purchase process for this user.
+					// Setting the user account to `Completed` ends the purchase process for this
+					// user.
 					status.validity = AccountValidity::Completed;
 					Self::deposit_event(Event::<T>::PaymentComplete {
 						who: who.clone(),
@@ -645,17 +649,20 @@ mod tests {
 	}
 
 	fn alice_signature() -> [u8; 64] {
-		// echo -n "Hello, World" | subkey -s sign "bottom drive obey lake curtain smoke basket hold race lonely fit walk//Alice"
+		// echo -n "Hello, World" | subkey -s sign "bottom drive obey lake curtain smoke basket hold
+		// race lonely fit walk//Alice"
 		hex_literal::hex!("20e0faffdf4dfe939f2faa560f73b1d01cde8472e2b690b7b40606a374244c3a2e9eb9c8107c10b605138374003af8819bd4387d7c24a66ee9253c2e688ab881")
 	}
 
 	fn bob_signature() -> [u8; 64] {
-		// echo -n "Hello, World" | subkey -s sign "bottom drive obey lake curtain smoke basket hold race lonely fit walk//Bob"
+		// echo -n "Hello, World" | subkey -s sign "bottom drive obey lake curtain smoke basket hold
+		// race lonely fit walk//Bob"
 		hex_literal::hex!("d6d460187ecf530f3ec2d6e3ac91b9d083c8fbd8f1112d92a82e4d84df552d18d338e6da8944eba6e84afaacf8a9850f54e7b53a84530d649be2e0119c7ce889")
 	}
 
 	fn alice_signature_ed25519() -> [u8; 64] {
-		// echo -n "Hello, World" | subkey -e sign "bottom drive obey lake curtain smoke basket hold race lonely fit walk//Alice"
+		// echo -n "Hello, World" | subkey -e sign "bottom drive obey lake curtain smoke basket hold
+		// race lonely fit walk//Alice"
 		hex_literal::hex!("ee3f5a6cbfc12a8f00c18b811dc921b550ddf272354cda4b9a57b1d06213fcd8509f5af18425d39a279d13622f14806c3e978e2163981f2ec1c06e9628460b0e")
 	}
 
diff --git a/runtime/common/src/slots/mod.rs b/runtime/common/src/slots/mod.rs
index 0be75fcba2b1..b4e136b1211c 100644
--- a/runtime/common/src/slots/mod.rs
+++ b/runtime/common/src/slots/mod.rs
@@ -14,12 +14,13 @@
 // You should have received a copy of the GNU General Public License
 // along with Polkadot.  If not, see <http://www.gnu.org/licenses/>.
 
-//! Parathread and parachains leasing system. Allows para IDs to be claimed, the code and data to be initialized and
-//! parachain slots (i.e. continuous scheduling) to be leased. Also allows for parachains and parathreads to be
-//! swapped.
+//! Parathread and parachains leasing system. Allows para IDs to be claimed, the code and data to be
+//! initialized and parachain slots (i.e. continuous scheduling) to be leased. Also allows for
+//! parachains and parathreads to be swapped.
 //!
-//! This doesn't handle the mechanics of determining which para ID actually ends up with a parachain lease. This
-//! must handled by a separately, through the trait interface that this pallet provides or the root dispatchables.
+//! This doesn't handle the mechanics of determining which para ID actually ends up with a parachain
+//! lease. This must handled by a separately, through the trait interface that this pallet provides
+//! or the root dispatchables.
 
 pub mod migration;
 
@@ -98,8 +99,8 @@ pub mod pallet {
 
 	/// Amounts held on deposit for each (possibly future) leased parachain.
 	///
-	/// The actual amount locked on its behalf by any account at any time is the maximum of the second values
-	/// of the items in this list whose first value is the account.
+	/// The actual amount locked on its behalf by any account at any time is the maximum of the
+	/// second values of the items in this list whose first value is the account.
 	///
 	/// The first item in the list is the amount locked for the current Lease Period. Following
 	/// items are for the subsequent lease periods.
@@ -160,8 +161,8 @@ pub mod pallet {
 
 	#[pallet::call]
 	impl<T: Config> Pallet<T> {
-		/// Just a connect into the `lease_out` call, in case Root wants to force some lease to happen
-		/// independently of any other on-chain mechanism to use it.
+		/// Just a connect into the `lease_out` call, in case Root wants to force some lease to
+		/// happen independently of any other on-chain mechanism to use it.
 		///
 		/// The dispatch origin for this call must match `T::ForceOrigin`.
 		#[pallet::call_index(0)]
@@ -268,8 +269,8 @@ impl<T: Config> Pallet<T> {
 					// deposit for the parachain.
 					let now_held = Self::deposit_held(para, &ended_lease.0);
 
-					// If this is less than what we were holding for this leaser's now-ended lease, then
-					// unreserve it.
+					// If this is less than what we were holding for this leaser's now-ended lease,
+					// then unreserve it.
 					if let Some(rebate) = ended_lease.1.checked_sub(&now_held) {
 						T::Currency::unreserve(&ended_lease.0, rebate);
 					}
@@ -392,8 +393,8 @@ impl<T: Config> Leaser<BlockNumberFor<T>> for Pallet<T> {
 				}
 			}
 
-			// Figure out whether we already have some funds of `leaser` held in reserve for `para_id`.
-			//  If so, then we can deduct those from the amount that we need to reserve.
+			// Figure out whether we already have some funds of `leaser` held in reserve for
+			// `para_id`.  If so, then we can deduct those from the amount that we need to reserve.
 			let maybe_additional = amount.checked_sub(&Self::deposit_held(para, &leaser));
 			if let Some(ref additional) = maybe_additional {
 				T::Currency::reserve(&leaser, *additional)
@@ -403,7 +404,8 @@ impl<T: Config> Leaser<BlockNumberFor<T>> for Pallet<T> {
 			let reserved = maybe_additional.unwrap_or_default();
 
 			// Check if current lease period is same as period begin, and onboard them directly.
-			// This will allow us to support onboarding new parachains in the middle of a lease period.
+			// This will allow us to support onboarding new parachains in the middle of a lease
+			// period.
 			if current_lease_period == period_begin {
 				// Best effort. Not much we can do if this fails.
 				let _ = T::Registrar::make_parachain(para);
@@ -481,7 +483,8 @@ impl<T: Config> Leaser<BlockNumberFor<T>> for Pallet<T> {
 			None => return true,
 		};
 
-		// Get the leases, and check each item in the vec which is part of the range we are checking.
+		// Get the leases, and check each item in the vec which is part of the range we are
+		// checking.
 		let leases = Leases::<T>::get(para_id);
 		for slot in offset..=offset + period_count {
 			if let Some(Some(_)) = leases.get(slot) {
diff --git a/runtime/common/src/traits.rs b/runtime/common/src/traits.rs
index f24a5b977968..940c3dfa2fb3 100644
--- a/runtime/common/src/traits.rs
+++ b/runtime/common/src/traits.rs
@@ -113,11 +113,12 @@ pub trait Leaser<BlockNumber> {
 	///
 	/// `leaser` shall have a total of `amount` balance reserved by the implementer of this trait.
 	///
-	/// Note: The implementer of the trait (the leasing system) is expected to do all reserve/unreserve calls. The
-	/// caller of this trait *SHOULD NOT* pre-reserve the deposit (though should ensure that it is reservable).
+	/// Note: The implementer of the trait (the leasing system) is expected to do all
+	/// reserve/unreserve calls. The caller of this trait *SHOULD NOT* pre-reserve the deposit
+	/// (though should ensure that it is reservable).
 	///
-	/// The lease will last from `period_begin` for `period_count` lease periods. It is undefined if the `para`
-	/// already has a slot leased during those periods.
+	/// The lease will last from `period_begin` for `period_count` lease periods. It is undefined if
+	/// the `para` already has a slot leased during those periods.
 	///
 	/// Returns `Err` in the case of an error, and in which case nothing is changed.
 	fn lease_out(
@@ -128,8 +129,8 @@ pub trait Leaser<BlockNumber> {
 		period_count: Self::LeasePeriod,
 	) -> Result<(), LeaseError>;
 
-	/// Return the amount of balance currently held in reserve on `leaser`'s account for leasing `para`. This won't
-	/// go down outside a lease period.
+	/// Return the amount of balance currently held in reserve on `leaser`'s account for leasing
+	/// `para`. This won't go down outside a lease period.
 	fn deposit_held(
 		para: ParaId,
 		leaser: &Self::AccountId,
@@ -147,7 +148,8 @@ pub trait Leaser<BlockNumber> {
 	fn lease_period_index(block: BlockNumber) -> Option<(Self::LeasePeriod, bool)>;
 
 	/// Returns true if the parachain already has a lease in any of lease periods in the inclusive
-	/// range `[first_period, last_period]`, intersected with the unbounded range [`current_lease_period`..] .
+	/// range `[first_period, last_period]`, intersected with the unbounded range
+	/// [`current_lease_period`..] .
 	fn already_leased(
 		para_id: ParaId,
 		first_period: Self::LeasePeriod,
@@ -169,7 +171,8 @@ pub enum AuctionStatus<BlockNumber> {
 	/// will be `EndingPeriod(1, 5)`.
 	EndingPeriod(BlockNumber, BlockNumber),
 	/// We have completed the bidding process and are waiting for the VRF to return some acceptable
-	/// randomness to select the winner. The number represents how many blocks we have been waiting.
+	/// randomness to select the winner. The number represents how many blocks we have been
+	/// waiting.
 	VrfDelay(BlockNumber),
 }
 
@@ -224,9 +227,9 @@ pub trait Auctioneer<BlockNumber> {
 	/// - `last_slot`: The last lease period index of the range to be bid on (inclusive).
 	/// - `amount`: The total amount to be the bid for deposit over the range.
 	///
-	/// The account `Bidder` must have at least `amount` available as a free balance in `Currency`. The
-	/// implementation *MUST* remove or reserve `amount` funds from `bidder` and those funds should be returned
-	/// or freed once the bid is rejected or lease has ended.
+	/// The account `Bidder` must have at least `amount` available as a free balance in `Currency`.
+	/// The implementation *MUST* remove or reserve `amount` funds from `bidder` and those funds
+	/// should be returned or freed once the bid is rejected or lease has ended.
 	fn place_bid(
 		bidder: Self::AccountId,
 		para: ParaId,
diff --git a/runtime/kusama/src/xcm_config.rs b/runtime/kusama/src/xcm_config.rs
index 59e32f2ca544..5725f54eddd5 100644
--- a/runtime/kusama/src/xcm_config.rs
+++ b/runtime/kusama/src/xcm_config.rs
@@ -63,8 +63,8 @@ parameter_types! {
 	pub LocalCheckAccount: (AccountId, MintLocation) = (CheckAccount::get(), MintLocation::Local);
 }
 
-/// The canonical means of converting a `MultiLocation` into an `AccountId`, used when we want to determine
-/// the sovereign account controlled by a location.
+/// The canonical means of converting a `MultiLocation` into an `AccountId`, used when we want to
+/// determine the sovereign account controlled by a location.
 pub type SovereignAccountOf = (
 	// We can convert a child parachain using the standard `AccountId` conversion.
 	ChildParachainConvertsVia<ParaId, AccountId>,
@@ -72,8 +72,8 @@ pub type SovereignAccountOf = (
 	AccountId32Aliases<ThisNetwork, AccountId>,
 );
 
-/// Our asset transactor. This is what allows us to interest with the runtime facilities from the point of
-/// view of XCM-only concepts like `MultiLocation` and `MultiAsset`.
+/// Our asset transactor. This is what allows us to interest with the runtime facilities from the
+/// point of view of XCM-only concepts like `MultiLocation` and `MultiAsset`.
 ///
 /// Ours is only aware of the Balances pallet, which is mapped to `TokenLocation`.
 pub type LocalAssetTransactor = XcmCurrencyAdapter<
@@ -360,8 +360,8 @@ parameter_types! {
 	pub ReachableDest: Option<MultiLocation> = Some(Parachain(1000).into());
 }
 
-/// Type to convert an `Origin` type value into a `MultiLocation` value which represents an interior location
-/// of this chain.
+/// Type to convert an `Origin` type value into a `MultiLocation` value which represents an interior
+/// location of this chain.
 pub type LocalOriginToLocation = (
 	// And a usual Signed origin to be used in XCM as a corresponding AccountId32
 	SignedToAccountId32<RuntimeOrigin, AccountId, ThisNetwork>,
@@ -374,8 +374,8 @@ pub type StakingAdminToPlurality =
 /// Type to convert the Fellows origin to a Plurality `MultiLocation` value.
 pub type FellowsToPlurality = OriginToPluralityVoice<RuntimeOrigin, Fellows, FellowsBodyId>;
 
-/// Type to convert a pallet `Origin` type value into a `MultiLocation` value which represents an interior location
-/// of this chain for a destination chain.
+/// Type to convert a pallet `Origin` type value into a `MultiLocation` value which represents an
+/// interior location of this chain for a destination chain.
 pub type LocalPalletOriginToLocation = (
 	// StakingAdmin origin to be used in XCM as a corresponding Plurality `MultiLocation` value.
 	StakingAdminToPlurality,
@@ -386,16 +386,17 @@ pub type LocalPalletOriginToLocation = (
 impl pallet_xcm::Config for Runtime {
 	type RuntimeEvent = RuntimeEvent;
 	// We only allow the root, the council, fellows and the staking admin to send messages.
-	// This is basically safe to enable for everyone (safe the possibility of someone spamming the parachain
-	// if they're willing to pay the KSM to send from the Relay-chain), but it's useless until we bring in XCM v3
-	// which will make `DescendOrigin` a bit more useful.
+	// This is basically safe to enable for everyone (safe the possibility of someone spamming the
+	// parachain if they're willing to pay the KSM to send from the Relay-chain), but it's useless
+	// until we bring in XCM v3 which will make `DescendOrigin` a bit more useful.
 	type SendXcmOrigin = xcm_builder::EnsureXcmOrigin<RuntimeOrigin, LocalPalletOriginToLocation>;
 	type XcmRouter = XcmRouter;
 	// Anyone can execute XCM messages locally.
 	type ExecuteXcmOrigin = xcm_builder::EnsureXcmOrigin<RuntimeOrigin, LocalOriginToLocation>;
 	type XcmExecuteFilter = Everything;
 	type XcmExecutor = xcm_executor::XcmExecutor<XcmConfig>;
-	// Anyone is able to use teleportation regardless of who they are and what they want to teleport.
+	// Anyone is able to use teleportation regardless of who they are and what they want to
+	// teleport.
 	type XcmTeleportFilter = Everything;
 	// Anyone is able to use reserve transfers regardless of who they are and what they want to
 	// transfer.
@@ -450,7 +451,8 @@ fn karura_liquid_staking_xcm_has_sane_weight_upper_limt() {
 	else {
 		panic!("no Transact instruction found")
 	};
-	// should be pallet_utility.as_derivative { index: 0, call: pallet_staking::bond_extra { max_additional: 2490000000000 } }
+	// should be pallet_utility.as_derivative { index: 0, call: pallet_staking::bond_extra {
+	// max_additional: 2490000000000 } }
 	let message_call = call.take_decoded().expect("can't decode Transact call");
 	let call_weight = message_call.get_dispatch_info().weight;
 	// Ensure that the Transact instruction is giving a sensible `require_weight_at_most` value
diff --git a/runtime/parachains/src/builder.rs b/runtime/parachains/src/builder.rs
index e46c9f59b957..892e934e6dfc 100644
--- a/runtime/parachains/src/builder.rs
+++ b/runtime/parachains/src/builder.rs
@@ -174,7 +174,8 @@ impl<T: paras_inherent::Config> BenchBuilder<T> {
 		configuration::Pallet::<T>::config().max_validators.unwrap_or(200)
 	}
 
-	/// Maximum number of validators participating in parachains consensus (a.k.a. active validators).
+	/// Maximum number of validators participating in parachains consensus (a.k.a. active
+	/// validators).
 	fn max_validators(&self) -> u32 {
 		self.max_validators.unwrap_or(Self::fallback_max_validators())
 	}
@@ -186,8 +187,8 @@ impl<T: paras_inherent::Config> BenchBuilder<T> {
 		self
 	}
 
-	/// Maximum number of validators per core (a.k.a. max validators per group). This value is used if none is
-	/// explicitly set on the builder.
+	/// Maximum number of validators per core (a.k.a. max validators per group). This value is used
+	/// if none is explicitly set on the builder.
 	pub(crate) fn fallback_max_validators_per_core() -> u32 {
 		configuration::Pallet::<T>::config().max_validators_per_core.unwrap_or(5)
 	}
@@ -479,7 +480,8 @@ impl<T: paras_inherent::Config> BenchBuilder<T> {
 	/// Create backed candidates for `cores_with_backed_candidates`. You need these cores to be
 	/// scheduled _within_ paras inherent, which requires marking the available bitfields as fully
 	/// available.
-	/// - `cores_with_backed_candidates` Mapping of `para_id`/`core_idx`/`group_idx` seed to number of
+	/// - `cores_with_backed_candidates` Mapping of `para_id`/`core_idx`/`group_idx` seed to number
+	///   of
 	/// validity votes.
 	fn create_backed_candidates(
 		&self,
@@ -687,9 +689,9 @@ impl<T: paras_inherent::Config> BenchBuilder<T> {
 		);
 		assert_eq!(inclusion::PendingAvailability::<T>::iter().count(), used_cores as usize,);
 
-		// Mark all the used cores as occupied. We expect that their are `backed_and_concluding_cores`
-		// that are pending availability and that there are `used_cores - backed_and_concluding_cores `
-		// which are about to be disputed.
+		// Mark all the used cores as occupied. We expect that their are
+		// `backed_and_concluding_cores` that are pending availability and that there are
+		// `used_cores - backed_and_concluding_cores ` which are about to be disputed.
 		scheduler::AvailabilityCores::<T>::set(vec![
 			Some(CoreOccupied::Parachain);
 			used_cores as usize
diff --git a/runtime/parachains/src/configuration.rs b/runtime/parachains/src/configuration.rs
index 38a24211fb67..d4ad8619f16e 100644
--- a/runtime/parachains/src/configuration.rs
+++ b/runtime/parachains/src/configuration.rs
@@ -54,12 +54,12 @@ const LOG_TARGET: &str = "runtime::configuration";
 	serde::Deserialize,
 )]
 pub struct HostConfiguration<BlockNumber> {
-	// NOTE: This structure is used by parachains via merkle proofs. Therefore, this struct requires
-	// special treatment.
+	// NOTE: This structure is used by parachains via merkle proofs. Therefore, this struct
+	// requires special treatment.
 	//
-	// A parachain requested this struct can only depend on the subset of this struct. Specifically,
-	// only a first few fields can be depended upon. These fields cannot be changed without
-	// corresponding migration of the parachains.
+	// A parachain requested this struct can only depend on the subset of this struct.
+	// Specifically, only a first few fields can be depended upon. These fields cannot be changed
+	// without corresponding migration of the parachains.
 	/**
 	 * The parameters that are required for the parachains.
 	 */
@@ -88,9 +88,9 @@ pub struct HostConfiguration<BlockNumber> {
 	pub hrmp_max_message_num_per_candidate: u32,
 	/// The minimum period, in blocks, between which parachains can update their validation code.
 	///
-	/// This number is used to prevent parachains from spamming the relay chain with validation code
-	/// upgrades. The only thing it controls is the number of blocks the `UpgradeRestrictionSignal`
-	/// is set for the parachain in question.
+	/// This number is used to prevent parachains from spamming the relay chain with validation
+	/// code upgrades. The only thing it controls is the number of blocks the
+	/// `UpgradeRestrictionSignal` is set for the parachain in question.
 	///
 	/// If PVF pre-checking is enabled this should be greater than the maximum number of blocks
 	/// PVF pre-checking can take. Intuitively, this number should be greater than the duration
@@ -113,14 +113,15 @@ pub struct HostConfiguration<BlockNumber> {
 	/// been completed.
 	///
 	/// Note, there are situations in which `expected_at` in the past. For example, if
-	/// [`chain_availability_period`] or [`thread_availability_period`] is less than the delay set by
-	/// this field or if PVF pre-check took more time than the delay. In such cases, the upgrade is
-	/// further at the earliest possible time determined by [`minimum_validation_upgrade_delay`].
+	/// [`chain_availability_period`] or [`thread_availability_period`] is less than the delay set
+	/// by this field or if PVF pre-check took more time than the delay. In such cases, the upgrade
+	/// is further at the earliest possible time determined by
+	/// [`minimum_validation_upgrade_delay`].
 	///
 	/// The rationale for this delay has to do with relay-chain reversions. In case there is an
-	/// invalid candidate produced with the new version of the code, then the relay-chain can revert
-	/// [`validation_upgrade_delay`] many blocks back and still find the new code in the storage by
-	/// hash.
+	/// invalid candidate produced with the new version of the code, then the relay-chain can
+	/// revert [`validation_upgrade_delay`] many blocks back and still find the new code in the
+	/// storage by hash.
 	///
 	/// [#4601]: https://github.com/paritytech/polkadot/issues/4601
 	pub validation_upgrade_delay: BlockNumber,
@@ -179,13 +180,13 @@ pub struct HostConfiguration<BlockNumber> {
 	/// Must be non-zero.
 	pub group_rotation_frequency: BlockNumber,
 	/// The availability period, in blocks, for parachains. This is the amount of blocks
-	/// after inclusion that validators have to make the block available and signal its availability to
-	/// the chain.
+	/// after inclusion that validators have to make the block available and signal its
+	/// availability to the chain.
 	///
 	/// Must be at least 1.
 	pub chain_availability_period: BlockNumber,
-	/// The availability period, in blocks, for parathreads. Same as the `chain_availability_period`,
-	/// but a differing timeout due to differing requirements.
+	/// The availability period, in blocks, for parathreads. Same as the
+	/// `chain_availability_period`, but a differing timeout due to differing requirements.
 	///
 	/// Must be at least 1.
 	pub thread_availability_period: BlockNumber,
@@ -217,8 +218,8 @@ pub struct HostConfiguration<BlockNumber> {
 	pub needed_approvals: u32,
 	/// The number of samples to do of the `RelayVRFModulo` approval assignment criterion.
 	pub relay_vrf_modulo_samples: u32,
-	/// If an active PVF pre-checking vote observes this many number of sessions it gets automatically
-	/// rejected.
+	/// If an active PVF pre-checking vote observes this many number of sessions it gets
+	/// automatically rejected.
 	///
 	/// 0 means PVF pre-checking will be rejected on the first observed session unless the voting
 	/// gained supermajority before that the session change.
@@ -849,7 +850,8 @@ pub mod pallet {
 			})
 		}
 
-		/// Sets the maximum total size of items that can present in a upward dispatch queue at once.
+		/// Sets the maximum total size of items that can present in a upward dispatch queue at
+		/// once.
 		#[pallet::call_index(24)]
 		#[pallet::weight((
 			T::WeightInfo::set_config_with_u32(),
@@ -1257,8 +1259,8 @@ impl<T: Config> Pallet<T> {
 		// 3. pending_configs = [(cur+1, X)]
 		//    There is a pending configuration scheduled and it will be applied in the next session.
 		//
-		//    We will use X as the base configuration. We need to schedule a new configuration change
-		//    for the `scheduled_session` and use X as the base for the new configuration.
+		//    We will use X as the base configuration. We need to schedule a new configuration
+		// change    for the `scheduled_session` and use X as the base for the new configuration.
 		//
 		// 4. pending_configs = [(cur+1, X), (cur+2, Y)]
 		//    There is a pending configuration change in the next session and for the scheduled
diff --git a/runtime/parachains/src/configuration/migration/v7.rs b/runtime/parachains/src/configuration/migration/v7.rs
index cdff80a31a3a..78a7cf9e4dc0 100644
--- a/runtime/parachains/src/configuration/migration/v7.rs
+++ b/runtime/parachains/src/configuration/migration/v7.rs
@@ -182,10 +182,12 @@ mod tests {
 		// Steps:
 		// 1. Go to Polkadot.js -> Developer -> Chain state -> Storage: https://polkadot.js.org/apps/#/chainstate
 		// 2. Set these parameters:
-		//   2.1. selected state query: configuration; activeConfig(): PolkadotRuntimeParachainsConfigurationHostConfiguration
-		//   2.2. blockhash to query at: 0xf89d3ab5312c5f70d396dc59612f0aa65806c798346f9db4b35278baed2e0e53 (the hash of the block)
-		//   2.3. Note the value of encoded storage key -> 0x06de3d8a54d27e44a9d5ce189618f22db4b49d95320d9021994c850f25b8e385 for the referenced block.
-		//   2.4. You'll also need the decoded values to update the test.
+		//   2.1. selected state query: configuration; activeConfig():
+		// PolkadotRuntimeParachainsConfigurationHostConfiguration   2.2. blockhash to query at:
+		// 0xf89d3ab5312c5f70d396dc59612f0aa65806c798346f9db4b35278baed2e0e53 (the hash of the
+		// block)   2.3. Note the value of encoded storage key ->
+		// 0x06de3d8a54d27e44a9d5ce189618f22db4b49d95320d9021994c850f25b8e385 for the referenced
+		// block.   2.4. You'll also need the decoded values to update the test.
 		// 3. Go to Polkadot.js -> Developer -> Chain state -> Raw storage
 		//   3.1 Enter the encoded storage key and you get the raw config.
 
@@ -196,8 +198,8 @@ mod tests {
 		let v6 =
 			V6HostConfiguration::<primitives::BlockNumber>::decode(&mut &raw_config[..]).unwrap();
 
-		// We check only a sample of the values here. If we missed any fields or messed up data types
-		// that would skew all the fields coming after.
+		// We check only a sample of the values here. If we missed any fields or messed up data
+		// types that would skew all the fields coming after.
 		assert_eq!(v6.max_code_size, 3_145_728);
 		assert_eq!(v6.validation_upgrade_cooldown, 200);
 		assert_eq!(v6.max_pov_size, 5_242_880);
@@ -209,8 +211,8 @@ mod tests {
 
 	#[test]
 	fn test_migrate_to_v7() {
-		// Host configuration has lots of fields. However, in this migration we only remove one field.
-		// The most important part to check are a couple of the last fields. We also pick
+		// Host configuration has lots of fields. However, in this migration we only remove one
+		// field. The most important part to check are a couple of the last fields. We also pick
 		// extra fields to check arbitrarily, e.g. depending on their position (i.e. the middle) and
 		// also their type.
 		//
@@ -291,7 +293,8 @@ mod tests {
 		});
 	}
 
-	// Test that migration doesn't panic in case there're no pending configurations upgrades in pallet's storage.
+	// Test that migration doesn't panic in case there're no pending configurations upgrades in
+	// pallet's storage.
 	#[test]
 	fn test_migrate_to_v7_no_pending() {
 		let v6 = V6HostConfiguration::<primitives::BlockNumber>::default();
diff --git a/runtime/parachains/src/disputes.rs b/runtime/parachains/src/disputes.rs
index 7b03cde8ed28..cf2e99e7359a 100644
--- a/runtime/parachains/src/disputes.rs
+++ b/runtime/parachains/src/disputes.rs
@@ -887,8 +887,8 @@ impl<T: Config> Pallet<T> {
 				#[allow(deprecated)]
 				<BackersOnDisputes<T>>::remove_prefix(to_prune, None);
 
-				// This is larger, and will be extracted to the `shared` pallet for more proper pruning.
-				// TODO: https://github.com/paritytech/polkadot/issues/3469
+				// This is larger, and will be extracted to the `shared` pallet for more proper
+				// pruning. TODO: https://github.com/paritytech/polkadot/issues/3469
 				#[allow(deprecated)]
 				<Included<T>>::remove_prefix(to_prune, None);
 			}
@@ -1178,7 +1178,8 @@ impl<T: Config> Pallet<T> {
 
 		<Disputes<T>>::insert(&session, &candidate_hash, &summary.state);
 
-		// Freeze if the INVALID votes against some local candidate are above the byzantine threshold
+		// Freeze if the INVALID votes against some local candidate are above the byzantine
+		// threshold
 		if summary.new_flags.contains(DisputeStateFlags::AGAINST_BYZANTINE) {
 			if let Some(revert_to) = <Included<T>>::get(&session, &candidate_hash) {
 				Self::revert_and_freeze(revert_to);
diff --git a/runtime/parachains/src/disputes/migration.rs b/runtime/parachains/src/disputes/migration.rs
index af216fa0408e..ccd367e41b36 100644
--- a/runtime/parachains/src/disputes/migration.rs
+++ b/runtime/parachains/src/disputes/migration.rs
@@ -79,14 +79,16 @@ pub mod v1 {
 		}
 	}
 
-	/// Migrates the pallet storage to the most recent version, checking and setting the `StorageVersion`.
+	/// Migrates the pallet storage to the most recent version, checking and setting the
+	/// `StorageVersion`.
 	pub fn migrate_to_v1<T: Config>() -> Weight {
 		let mut weight: Weight = Weight::zero();
 
 		// SpamSlots should not contain too many keys so removing everything at once should be safe
 		let res = SpamSlots::<T>::clear(u32::MAX, None);
 		// `loops` is the number of iterations => used to calculate read weights
-		// `backend` is the number of keys removed from the backend => used to calculate write weights
+		// `backend` is the number of keys removed from the backend => used to calculate write
+		// weights
 		weight = weight
 			.saturating_add(T::DbWeight::get().reads_writes(res.loops as u64, res.backend as u64));
 
diff --git a/runtime/parachains/src/disputes/tests.rs b/runtime/parachains/src/disputes/tests.rs
index 93dcd58264b2..acdba343274c 100644
--- a/runtime/parachains/src/disputes/tests.rs
+++ b/runtime/parachains/src/disputes/tests.rs
@@ -871,7 +871,8 @@ mod unconfirmed_disputes {
 	use assert_matches::assert_matches;
 	use sp_runtime::ModuleError;
 
-	// Shared initialization code between `test_unconfirmed_are_ignored` and `test_unconfirmed_disputes_cause_block_import_error`
+	// Shared initialization code between `test_unconfirmed_are_ignored` and
+	// `test_unconfirmed_disputes_cause_block_import_error`
 	fn generate_dispute_statement_set_and_run_to_block() -> DisputeStatementSet {
 		// 7 validators needed for byzantine threshold of 2.
 		let v0 = <ValidatorId as CryptoType>::Pair::generate().0;
@@ -2060,7 +2061,8 @@ fn deduplication_and_sorting_works() {
 		)
 		.unwrap_err();
 
-		// assert ordering of local only disputes, and at the same time, and being free of duplicates
+		// assert ordering of local only disputes, and at the same time, and being free of
+		// duplicates
 		assert_eq!(disputes_orig.len(), disputes.len() + 1);
 
 		let are_these_equal = |a: &DisputeStatementSet, b: &DisputeStatementSet| {
diff --git a/runtime/parachains/src/hrmp.rs b/runtime/parachains/src/hrmp.rs
index c876749e853d..1be2fe57b1df 100644
--- a/runtime/parachains/src/hrmp.rs
+++ b/runtime/parachains/src/hrmp.rs
@@ -117,12 +117,12 @@ pub struct HrmpOpenChannelRequest {
 #[derive(Encode, Decode, TypeInfo)]
 #[cfg_attr(test, derive(Debug))]
 pub struct HrmpChannel {
-	// NOTE: This structure is used by parachains via merkle proofs. Therefore, this struct requires
-	// special treatment.
+	// NOTE: This structure is used by parachains via merkle proofs. Therefore, this struct
+	// requires special treatment.
 	//
-	// A parachain requested this struct can only depend on the subset of this struct. Specifically,
-	// only a first few fields can be depended upon (See `AbridgedHrmpChannel`). These fields cannot
-	// be changed without corresponding migration of parachains.
+	// A parachain requested this struct can only depend on the subset of this struct.
+	// Specifically, only a first few fields can be depended upon (See `AbridgedHrmpChannel`).
+	// These fields cannot be changed without corresponding migration of parachains.
 	/// The maximum number of messages that can be pending in the channel at once.
 	pub max_capacity: u32,
 	/// The maximum total size of the messages that can be pending in the channel at once.
@@ -370,7 +370,8 @@ pub mod pallet {
 
 	/// The HRMP watermark associated with each para.
 	/// Invariant:
-	/// - each para `P` used here as a key should satisfy `Paras::is_valid_para(P)` within a session.
+	/// - each para `P` used here as a key should satisfy `Paras::is_valid_para(P)` within a
+	///   session.
 	#[pallet::storage]
 	pub type HrmpWatermarks<T: Config> = StorageMap<_, Twox64Concat, ParaId, BlockNumberFor<T>>;
 
@@ -968,9 +969,9 @@ impl<T: Config> Pallet<T> {
 			out_hrmp_msgs.iter().enumerate().map(|(idx, out_msg)| (idx as u32, out_msg))
 		{
 			match last_recipient {
-				// the messages must be sorted in ascending order and there must be no two messages sent
-				// to the same recipient. Thus we can check that every recipient is strictly greater than
-				// the previous one.
+				// the messages must be sorted in ascending order and there must be no two messages
+				// sent to the same recipient. Thus we can check that every recipient is strictly
+				// greater than the previous one.
 				Some(last_recipient) if out_msg.recipient <= last_recipient =>
 					return Err(OutboundHrmpAcceptanceErr::NotSorted { idx }),
 				_ => last_recipient = Some(out_msg.recipient),
diff --git a/runtime/parachains/src/inclusion/mod.rs b/runtime/parachains/src/inclusion/mod.rs
index c71657d1ac43..f4ef3b95065e 100644
--- a/runtime/parachains/src/inclusion/mod.rs
+++ b/runtime/parachains/src/inclusion/mod.rs
@@ -17,8 +17,8 @@
 //! The inclusion pallet is responsible for inclusion and availability of scheduled parachains
 //! and parathreads.
 //!
-//! It is responsible for carrying candidates from being backable to being backed, and then from backed
-//! to included.
+//! It is responsible for carrying candidates from being backable to being backed, and then from
+//! backed to included.
 
 use crate::{
 	configuration::{self, HostConfiguration},
@@ -76,8 +76,8 @@ impl WeightInfo for () {
 
 /// Maximum value that `config.max_upward_message_size` can be set to.
 ///
-/// This is used for benchmarking sanely bounding relevant storage items. It is expected from the `configuration`
-/// pallet to check these values before setting.
+/// This is used for benchmarking sanely bounding relevant storage items. It is expected from the
+/// `configuration` pallet to check these values before setting.
 pub const MAX_UPWARD_MESSAGE_SIZE_BOUND: u32 = 128 * 1024;
 
 /// A bitfield signed by a validator indicating that it is keeping its piece of the erasure-coding
@@ -354,8 +354,8 @@ pub mod pallet {
 		InvalidOutboundHrmp,
 		/// The validation code hash of the candidate is not valid.
 		InvalidValidationCodeHash,
-		/// The `para_head` hash in the candidate descriptor doesn't match the hash of the actual para head in the
-		/// commitments.
+		/// The `para_head` hash in the candidate descriptor doesn't match the hash of the actual
+		/// para head in the commitments.
 		ParaHeadMismatch,
 		/// A bitfield that references a freed core,
 		/// either intentionally or as part of a concluded
@@ -492,8 +492,8 @@ impl<T: Config> Pallet<T> {
 	///
 	/// Updates storage items `PendingAvailability` and `AvailabilityBitfields`.
 	///
-	/// Returns a `Vec` of `CandidateHash`es and their respective `AvailabilityCore`s that became available,
-	/// and cores free.
+	/// Returns a `Vec` of `CandidateHash`es and their respective `AvailabilityCore`s that became
+	/// available, and cores free.
 	pub(crate) fn update_pending_availability_and_get_freed_cores<F>(
 		expected_bits: usize,
 		validators: &[ValidatorId],
@@ -530,8 +530,8 @@ impl<T: Config> Pallet<T> {
 					continue
 				};
 
-				// defensive check - this is constructed by loading the availability bitfield record,
-				// which is always `Some` if the core is occupied - that's why we're here.
+				// defensive check - this is constructed by loading the availability bitfield
+				// record, which is always `Some` if the core is occupied - that's why we're here.
 				let validator_index = validator_index.0 as usize;
 				if let Some(mut bit) =
 					pending_availability.as_mut().and_then(|candidate_pending_availability| {
@@ -591,8 +591,8 @@ impl<T: Config> Pallet<T> {
 		freed_cores
 	}
 
-	/// Process candidates that have been backed. Provide the relay storage root, a set of candidates
-	/// and scheduled cores.
+	/// Process candidates that have been backed. Provide the relay storage root, a set of
+	/// candidates and scheduled cores.
 	///
 	/// Both should be sorted ascending by core index, and the candidates should be a subset of
 	/// scheduled cores. If these conditions are not met, the execution of the function fails.
@@ -968,7 +968,8 @@ impl<T: Config> Pallet<T> {
 				})
 			}
 			// make sure that the queue is not overfilled.
-			// we do it here only once since returning false invalidates the whole relay-chain block.
+			// we do it here only once since returning false invalidates the whole relay-chain
+			// block.
 			if para_queue_size.saturating_add(msg_size as u64) > config.max_upward_queue_size as u64
 			{
 				return Err(UmpAcceptanceCheckErr::TotalSizeExceeded {
diff --git a/runtime/parachains/src/initializer.rs b/runtime/parachains/src/initializer.rs
index b9ecc3038ca2..e006c38e6dec 100644
--- a/runtime/parachains/src/initializer.rs
+++ b/runtime/parachains/src/initializer.rs
@@ -17,7 +17,8 @@
 //! This module is responsible for maintaining a consistent initialization order for all other
 //! parachains modules. It's also responsible for finalization and session change notifications.
 //!
-//! This module can throw fatal errors if session-change notifications are received after initialization.
+//! This module can throw fatal errors if session-change notifications are received after
+//! initialization.
 
 use crate::{
 	configuration::{self, HostConfiguration},
@@ -128,9 +129,9 @@ pub mod pallet {
 	/// Semantically a `bool`, but this guarantees it should never hit the trie,
 	/// as this is cleared in `on_finalize` and Frame optimizes `None` values to be empty values.
 	///
-	/// As a `bool`, `set(false)` and `remove()` both lead to the next `get()` being false, but one of
-	/// them writes to the trie and one does not. This confusion makes `Option<()>` more suitable for
-	/// the semantics of this variable.
+	/// As a `bool`, `set(false)` and `remove()` both lead to the next `get()` being false, but one
+	/// of them writes to the trie and one does not. This confusion makes `Option<()>` more suitable
+	/// for the semantics of this variable.
 	#[pallet::storage]
 	pub(super) type HasInitialized<T: Config> = StorageValue<_, ()>;
 
@@ -190,7 +191,8 @@ pub mod pallet {
 			// Apply buffered session changes as the last thing. This way the runtime APIs and the
 			// next block will observe the next session.
 			//
-			// Note that we only apply the last session as all others lasted less than a block (weirdly).
+			// Note that we only apply the last session as all others lasted less than a block
+			// (weirdly).
 			if let Some(BufferedSessionChange { session_index, validators, queued }) =
 				BufferedSessionChanges::<T>::take().pop()
 			{
diff --git a/runtime/parachains/src/origin.rs b/runtime/parachains/src/origin.rs
index 14f8c3786c96..c83fec1b8923 100644
--- a/runtime/parachains/src/origin.rs
+++ b/runtime/parachains/src/origin.rs
@@ -38,7 +38,6 @@ where
 /// belongs to.
 ///
 /// This module fulfills only the single purpose of housing the `Origin` in `construct_runtime`.
-///
 // ideally, though, the `construct_runtime` should support a free-standing origin.
 #[frame_support::pallet]
 pub mod pallet {
diff --git a/runtime/parachains/src/paras/mod.rs b/runtime/parachains/src/paras/mod.rs
index 98c5075a4c94..4570bb2b13bd 100644
--- a/runtime/parachains/src/paras/mod.rs
+++ b/runtime/parachains/src/paras/mod.rs
@@ -43,10 +43,10 @@
 //!
 //! The conditions that must be met before the para can use the new validation code are:
 //!
-//! 1. The validation code should have been "soaked" in the storage for a given number of blocks. That
-//!    is, the validation code should have been stored in on-chain storage for some time, so that in
-//!    case of a revert with a non-extreme height difference, that validation code can still be
-//!    found on-chain.
+//! 1. The validation code should have been "soaked" in the storage for a given number of blocks.
+//! That    is, the validation code should have been stored in on-chain storage for some time, so
+//! that in    case of a revert with a non-extreme height difference, that validation code can still
+//! be    found on-chain.
 //!
 //! 2. The validation code was vetted by the validators and declared as non-malicious in a processes
 //!    known as PVF pre-checking.
@@ -105,7 +105,6 @@
 //! start──────▶│reset│
 //!             └─────┘
 //! ```
-//!
 
 use crate::{
 	configuration,
@@ -152,8 +151,8 @@ pub struct ReplacementTimes<N> {
 	/// first parablock included with a relay-parent with number >= this value.
 	expected_at: N,
 	/// The relay-chain block number at which the parablock activating the code upgrade was
-	/// actually included. This means considered included and available, so this is the time at which
-	/// that parablock enters the acceptance period in this fork of the relay-chain.
+	/// actually included. This means considered included and available, so this is the time at
+	/// which that parablock enters the acceptance period in this fork of the relay-chain.
 	activated_at: N,
 }
 
@@ -332,7 +331,8 @@ impl<'de> Deserialize<'de> for ParaKind {
 	}
 }
 
-// Manual encoding, decoding, and TypeInfo as the parakind field in ParaGenesisArgs used to be a bool
+// Manual encoding, decoding, and TypeInfo as the parakind field in ParaGenesisArgs used to be a
+// bool
 impl Encode for ParaKind {
 	fn size_hint(&self) -> usize {
 		true.size_hint()
@@ -373,12 +373,15 @@ pub(crate) enum PvfCheckCause<BlockNumber> {
 	Onboarding(ParaId),
 	/// PVF vote was initiated by signalling of an upgrade by the given para.
 	Upgrade {
-		/// The ID of the parachain that initiated or is waiting for the conclusion of pre-checking.
+		/// The ID of the parachain that initiated or is waiting for the conclusion of
+		/// pre-checking.
 		id: ParaId,
-		/// The relay-chain block number of **inclusion** of candidate that that initiated the upgrade.
+		/// The relay-chain block number of **inclusion** of candidate that that initiated the
+		/// upgrade.
 		///
-		/// It's important to count upgrade enactment delay from the inclusion of this candidate instead
-		/// of its relay parent -- in order to keep PVF available in case of chain reversions.
+		/// It's important to count upgrade enactment delay from the inclusion of this candidate
+		/// instead of its relay parent -- in order to keep PVF available in case of chain
+		/// reversions.
 		///
 		/// See https://github.com/paritytech/polkadot/issues/4601 for detailed explanation.
 		included_at: BlockNumber,
@@ -681,11 +684,11 @@ pub mod pallet {
 	pub(super) type PastCodeMeta<T: Config> =
 		StorageMap<_, Twox64Concat, ParaId, ParaPastCodeMeta<BlockNumberFor<T>>, ValueQuery>;
 
-	/// Which paras have past code that needs pruning and the relay-chain block at which the code was replaced.
-	/// Note that this is the actual height of the included block, not the expected height at which the
-	/// code upgrade would be applied, although they may be equal.
-	/// This is to ensure the entire acceptance period is covered, not an offset acceptance period starting
-	/// from the time at which the parachain perceives a code upgrade as having occurred.
+	/// Which paras have past code that needs pruning and the relay-chain block at which the code
+	/// was replaced. Note that this is the actual height of the included block, not the expected
+	/// height at which the code upgrade would be applied, although they may be equal.
+	/// This is to ensure the entire acceptance period is covered, not an offset acceptance period
+	/// starting from the time at which the parachain perceives a code upgrade as having occurred.
 	/// Multiple entries for a single para are permitted. Ordered ascending by block number.
 	#[pallet::storage]
 	pub(super) type PastCodePruning<T: Config> =
@@ -706,12 +709,13 @@ pub mod pallet {
 	pub(super) type FutureCodeHash<T: Config> =
 		StorageMap<_, Twox64Concat, ParaId, ValidationCodeHash>;
 
-	/// This is used by the relay-chain to communicate to a parachain a go-ahead with in the upgrade procedure.
+	/// This is used by the relay-chain to communicate to a parachain a go-ahead with in the upgrade
+	/// procedure.
 	///
 	/// This value is absent when there are no upgrades scheduled or during the time the relay chain
-	/// performs the checks. It is set at the first relay-chain block when the corresponding parachain
-	/// can switch its upgrade function. As soon as the parachain's block is included, the value
-	/// gets reset to `None`.
+	/// performs the checks. It is set at the first relay-chain block when the corresponding
+	/// parachain can switch its upgrade function. As soon as the parachain's block is included, the
+	/// value gets reset to `None`.
 	///
 	/// NOTE that this field is used by parachains via merkle storage proofs, therefore changing
 	/// the format will require migration of parachains.
@@ -896,8 +900,9 @@ pub mod pallet {
 		/// Otherwise, the code will be added into the storage. Note that the code will be added
 		/// into storage with reference count 0. This is to account the fact that there are no users
 		/// for this code yet. The caller will have to make sure that this code eventually gets
-		/// used by some parachain or removed from the storage to avoid storage leaks. For the latter
-		/// prefer to use the `poke_unused_validation_code` dispatchable to raw storage manipulation.
+		/// used by some parachain or removed from the storage to avoid storage leaks. For the
+		/// latter prefer to use the `poke_unused_validation_code` dispatchable to raw storage
+		/// manipulation.
 		///
 		/// This function is mainly meant to be used for upgrading parachains that do not follow
 		/// the go-ahead signal while the PVF pre-checking feature is enabled.
@@ -1569,10 +1574,11 @@ impl<T: Config> Pallet<T> {
 
 			match cause {
 				PvfCheckCause::Onboarding(id) => {
-					// Here we need to undo everything that was done during `schedule_para_initialize`.
-					// Essentially, the logic is similar to offboarding, with exception that before
-					// actual onboarding the parachain did not have a chance to reach to upgrades.
-					// Therefore we can skip all the upgrade related storage items here.
+					// Here we need to undo everything that was done during
+					// `schedule_para_initialize`. Essentially, the logic is similar to offboarding,
+					// with exception that before actual onboarding the parachain did not have a
+					// chance to reach to upgrades. Therefore we can skip all the upgrade related
+					// storage items here.
 					weight += T::DbWeight::get().writes(3);
 					UpcomingParasGenesis::<T>::remove(&id);
 					CurrentCodeHash::<T>::remove(&id);
@@ -1629,8 +1635,8 @@ impl<T: Config> Pallet<T> {
 		//
 		// - Doing it within the context of the PR that introduces this change is undesirable, since
 		//   it is already a big change, and that change would require a migration. Moreover, if we
-		//   run the new version of the runtime, there will be less things to worry about during
-		//   the eventual proper migration.
+		//   run the new version of the runtime, there will be less things to worry about during the
+		//   eventual proper migration.
 		//
 		// - This data type already is used for generating genesis, and changing it will probably
 		//   introduce some unnecessary burden.
@@ -1641,8 +1647,8 @@ impl<T: Config> Pallet<T> {
 		//   get rid of hashing of the validation code when onboarding.
 		//
 		// - Replace `validation_code` with a sentinel value: an empty vector. This should be fine
-		//   as long we do not allow registering parachains with empty code. At the moment of writing
-		//   this should already be the case.
+		//   as long we do not allow registering parachains with empty code. At the moment of
+		//   writing this should already be the case.
 		//
 		// - Empty value is treated as the current code is already inserted during the onboarding.
 		//
@@ -1670,7 +1676,8 @@ impl<T: Config> Pallet<T> {
 	///
 	/// Will return error if either is true:
 	///
-	/// - para is not a stable parachain or parathread (i.e. [`ParaLifecycle::is_stable`] is `false`)
+	/// - para is not a stable parachain or parathread (i.e. [`ParaLifecycle::is_stable`] is
+	///   `false`)
 	/// - para has a pending upgrade.
 	/// - para has unprocessed messages in its UMP queue.
 	///
@@ -1683,7 +1690,8 @@ impl<T: Config> Pallet<T> {
 		// ongoing PVF pre-checking votes. It also removes some nasty edge cases.
 		//
 		// However, an upcoming upgrade on its own imposes no restrictions. An upgrade is enacted
-		// with a new para head, so if a para never progresses we still should be able to offboard it.
+		// with a new para head, so if a para never progresses we still should be able to offboard
+		// it.
 		//
 		// This implicitly assumes that the given para exists, i.e. it's lifecycle != None.
 		if let Some(future_code_hash) = FutureCodeHash::<T>::get(&id) {
@@ -1768,13 +1776,14 @@ impl<T: Config> Pallet<T> {
 	/// the relay-chain block number will be determined at which the upgrade will take place. We
 	/// call that block `expected_at`.
 	///
-	/// Once the candidate with the relay-parent >= `expected_at` is enacted, the new validation code
-	/// will be applied. Therefore, the new code will be used to validate the next candidate.
+	/// Once the candidate with the relay-parent >= `expected_at` is enacted, the new validation
+	/// code will be applied. Therefore, the new code will be used to validate the next candidate.
 	///
 	/// The new code should not be equal to the current one, otherwise the upgrade will be aborted.
 	/// If there is already a scheduled code upgrade for the para, this is a no-op.
 	///
-	/// Inclusion block number specifies relay parent which enacted candidate initiating the upgrade.
+	/// Inclusion block number specifies relay parent which enacted candidate initiating the
+	/// upgrade.
 	pub(crate) fn schedule_code_upgrade(
 		id: ParaId,
 		new_code: ValidationCode,
@@ -1905,8 +1914,8 @@ impl<T: Config> Pallet<T> {
 		// We increase the code RC here in any case. Intuitively the parachain that requested this
 		// action is now a user of that PVF.
 		//
-		// If the result of the pre-checking is reject, then we would decrease the RC for each cause,
-		// including the current.
+		// If the result of the pre-checking is reject, then we would decrease the RC for each
+		// cause, including the current.
 		//
 		// If the result of the pre-checking is accept, then we do nothing to the RC because the PVF
 		// will continue be used by the same users.
@@ -1918,9 +1927,9 @@ impl<T: Config> Pallet<T> {
 		weight
 	}
 
-	/// Note that a para has progressed to a new head, where the new head was executed in the context
-	/// of a relay-chain block with given number. This will apply pending code upgrades based
-	/// on the relay-parent block number provided.
+	/// Note that a para has progressed to a new head, where the new head was executed in the
+	/// context of a relay-chain block with given number. This will apply pending code upgrades
+	/// based on the relay-parent block number provided.
 	pub(crate) fn note_new_head(
 		id: ParaId,
 		new_head: HeadData,
diff --git a/runtime/parachains/src/paras/tests.rs b/runtime/parachains/src/paras/tests.rs
index 2bf30bb273e5..4a3be6d7d50e 100644
--- a/runtime/parachains/src/paras/tests.rs
+++ b/runtime/parachains/src/paras/tests.rs
@@ -649,7 +649,8 @@ fn submit_code_change_when_not_allowed_is_err() {
 		Paras::schedule_code_upgrade(para_id, newer_code.clone(), 2, &Configuration::config());
 		assert_eq!(
 			FutureCodeUpgrades::<Test>::get(&para_id),
-			Some(1 + validation_upgrade_delay), // did not change since the same assertion from the last time.
+			Some(1 + validation_upgrade_delay), /* did not change since the same assertion from
+			                                     * the last time. */
 		);
 		assert_eq!(FutureCodeHash::<Test>::get(&para_id), Some(new_code.hash()));
 		check_code_is_not_stored(&newer_code);
@@ -1554,8 +1555,9 @@ fn increase_code_ref_doesnt_have_allergy_on_add_trusted_validation_code() {
 
 #[test]
 fn add_trusted_validation_code_insta_approval() {
-	// In particular, this tests that `kick_off_pvf_check` reacts to the `add_trusted_validation_code`
-	// and uses the `CodeByHash::contains_key` which is what `add_trusted_validation_code` uses.
+	// In particular, this tests that `kick_off_pvf_check` reacts to the
+	// `add_trusted_validation_code` and uses the `CodeByHash::contains_key` which is what
+	// `add_trusted_validation_code` uses.
 	let para_id = 100.into();
 	let validation_code = ValidationCode(vec![1, 2, 3]);
 	let validation_upgrade_delay = 25;
diff --git a/runtime/parachains/src/paras_inherent/mod.rs b/runtime/parachains/src/paras_inherent/mod.rs
index 61be0d4adae8..a40a3422a669 100644
--- a/runtime/parachains/src/paras_inherent/mod.rs
+++ b/runtime/parachains/src/paras_inherent/mod.rs
@@ -285,8 +285,9 @@ pub mod pallet {
 }
 
 impl<T: Config> Pallet<T> {
-	/// Create the `ParachainsInherentData` that gets passed to [`Self::enter`] in [`Self::create_inherent`].
-	/// This code is pulled out of [`Self::create_inherent`] so it can be unit tested.
+	/// Create the `ParachainsInherentData` that gets passed to [`Self::enter`] in
+	/// [`Self::create_inherent`]. This code is pulled out of [`Self::create_inherent`] so it can be
+	/// unit tested.
 	fn create_inherent_inner(data: &InherentData) -> Option<ParachainsInherentData<HeaderFor<T>>> {
 		let parachains_inherent_data = match data.get_data(&Self::INHERENT_IDENTIFIER) {
 			Ok(Some(d)) => d,
@@ -313,11 +314,11 @@ impl<T: Config> Pallet<T> {
 	/// The given inherent data is processed and state is altered accordingly. If any data could
 	/// not be applied (inconsitencies, weight limit, ...) it is removed.
 	///
-	/// When called from `create_inherent` the `context` must be set to `ProcessInherentDataContext::ProvideInherent`
-	/// so it guarantees the invariant that inherent is not overweight.
-	///  
-	/// It is **mandatory** that calls from `enter` set `context` to `ProcessInherentDataContext::Enter` to ensure
-	/// the weight invariant is checked.
+	/// When called from `create_inherent` the `context` must be set to
+	/// `ProcessInherentDataContext::ProvideInherent` so it guarantees the invariant that inherent
+	/// is not overweight.  
+	/// It is **mandatory** that calls from `enter` set `context` to
+	/// `ProcessInherentDataContext::Enter` to ensure the weight invariant is checked.
 	///
 	/// Returns: Result containing processed inherent data and weight, the processed inherent would
 	/// consume.
@@ -379,8 +380,8 @@ impl<T: Config> Pallet<T> {
 			let dispatch_class = DispatchClass::Mandatory;
 			let max_block_weight_full = <T as frame_system::Config>::BlockWeights::get();
 			log::debug!(target: LOG_TARGET, "Max block weight: {}", max_block_weight_full.max_block);
-			// Get max block weight for the mandatory class if defined, otherwise total max weight of
-			// the block.
+			// Get max block weight for the mandatory class if defined, otherwise total max weight
+			// of the block.
 			let max_weight = max_block_weight_full
 				.per_class
 				.get(dispatch_class)
@@ -412,7 +413,8 @@ impl<T: Config> Pallet<T> {
 			T::DisputesHandler::filter_dispute_data(set, post_conclusion_acceptance_period)
 		};
 
-		// Limit the disputes first, since the following statements depend on the votes include here.
+		// Limit the disputes first, since the following statements depend on the votes include
+		// here.
 		let (checked_disputes_sets, checked_disputes_sets_consumed_weight) =
 			limit_and_sanitize_disputes::<T, _>(
 				disputes,
@@ -449,8 +451,8 @@ impl<T: Config> Pallet<T> {
 			}
 			all_weight_after
 		} else {
-			// This check is performed in the context of block execution. Ensures inherent weight invariants guaranteed
-			// by `create_inherent_data` for block authorship.
+			// This check is performed in the context of block execution. Ensures inherent weight
+			// invariants guaranteed by `create_inherent_data` for block authorship.
 			if all_weight_before.any_gt(max_block_weight) {
 				log::error!(
 					"Overweight para inherent data reached the runtime {:?}: {} > {}",
@@ -714,13 +716,14 @@ fn random_sel<X, F: Fn(&X) -> Weight>(
 /// If there is sufficient space, all bitfields and all candidates
 /// will be included.
 ///
-/// Otherwise tries to include all disputes, and then tries to fill the remaining space with bitfields and then candidates.
+/// Otherwise tries to include all disputes, and then tries to fill the remaining space with
+/// bitfields and then candidates.
 ///
-/// The selection process is random. For candidates, there is an exception for code upgrades as they are preferred.
-/// And for disputes, local and older disputes are preferred (see `limit_and_sanitize_disputes`).
-/// for backed candidates, since with a increasing number of parachains their chances of
-/// inclusion become slim. All backed candidates  are checked beforehands in `fn create_inherent_inner`
-/// which guarantees sanity.
+/// The selection process is random. For candidates, there is an exception for code upgrades as they
+/// are preferred. And for disputes, local and older disputes are preferred (see
+/// `limit_and_sanitize_disputes`). for backed candidates, since with a increasing number of
+/// parachains their chances of inclusion become slim. All backed candidates  are checked
+/// beforehands in `fn create_inherent_inner` which guarantees sanity.
 ///
 /// Assumes disputes are already filtered by the time this is called.
 ///
@@ -977,7 +980,8 @@ fn compute_entropy<T: Config>(parent_hash: T::Hash) -> [u8; 32] {
 ///   1. If weight is exceeded by locals, pick the older ones (lower indices)
 ///      until the weight limit is reached.
 ///
-/// Returns the consumed weight amount, that is guaranteed to be less than the provided `max_consumable_weight`.
+/// Returns the consumed weight amount, that is guaranteed to be less than the provided
+/// `max_consumable_weight`.
 fn limit_and_sanitize_disputes<
 	T: Config,
 	CheckValidityFn: FnMut(DisputeStatementSet) -> Option<CheckedDisputeStatementSet>,
diff --git a/runtime/parachains/src/paras_inherent/tests.rs b/runtime/parachains/src/paras_inherent/tests.rs
index c2e80e7525fb..faf52b555ba3 100644
--- a/runtime/parachains/src/paras_inherent/tests.rs
+++ b/runtime/parachains/src/paras_inherent/tests.rs
@@ -68,9 +68,9 @@ mod enter {
 	}
 
 	#[test]
-	// Validate that if we create 2 backed candidates which are assigned to 2 cores that will be freed via
-	// becoming fully available, the backed candidates will not be filtered out in `create_inherent` and
-	// will not cause `enter` to early.
+	// Validate that if we create 2 backed candidates which are assigned to 2 cores that will be
+	// freed via becoming fully available, the backed candidates will not be filtered out in
+	// `create_inherent` and will not cause `enter` to early.
 	fn include_backed_candidates() {
 		new_test_ext(MockGenesisConfig::default()).execute_with(|| {
 			let dispute_statements = BTreeMap::new();
@@ -252,7 +252,8 @@ mod enter {
 			let expected_para_inherent_data = scenario.data.clone();
 
 			// Check the para inherent data is as expected:
-			// * 1 bitfield per validator (5 validators per core, 3 disputes => 3 cores, 15 validators)
+			// * 1 bitfield per validator (5 validators per core, 3 disputes => 3 cores, 15
+			//   validators)
 			assert_eq!(expected_para_inherent_data.bitfields.len(), 15);
 			// * 0 backed candidate per core
 			assert_eq!(expected_para_inherent_data.backed_candidates.len(), 0);
@@ -389,7 +390,8 @@ mod enter {
 			let expected_para_inherent_data = scenario.data.clone();
 
 			// Check the para inherent data is as expected:
-			// * 1 bitfield per validator (4 validators per core, 2 backed candidates, 3 disputes => 4*5 = 20)
+			// * 1 bitfield per validator (4 validators per core, 2 backed candidates, 3 disputes =>
+			//   4*5 = 20)
 			assert_eq!(expected_para_inherent_data.bitfields.len(), 20);
 			// * 2 backed candidates
 			assert_eq!(expected_para_inherent_data.backed_candidates.len(), 2);
@@ -408,7 +410,8 @@ mod enter {
 				Pallet::<Test>::create_inherent_inner(&inherent_data.clone()).unwrap();
 			assert!(limit_inherent_data != expected_para_inherent_data);
 
-			// Three disputes is over weight (see previous test), so we expect to only see 2 disputes
+			// Three disputes is over weight (see previous test), so we expect to only see 2
+			// disputes
 			assert_eq!(limit_inherent_data.disputes.len(), 2);
 			// Ensure disputes are filtered as expected
 			assert_eq!(limit_inherent_data.disputes[0].session, 1);
@@ -418,7 +421,8 @@ mod enter {
 				limit_inherent_data.bitfields.len(),
 				expected_para_inherent_data.bitfields.len()
 			);
-			// Ensure that all backed candidates are filtered out as either would make the block over weight
+			// Ensure that all backed candidates are filtered out as either would make the block
+			// over weight
 			assert_eq!(limit_inherent_data.backed_candidates.len(), 0);
 
 			assert_ok!(Pallet::<Test>::enter(
@@ -470,7 +474,8 @@ mod enter {
 			let expected_para_inherent_data = scenario.data.clone();
 
 			// Check the para inherent data is as expected:
-			// * 1 bitfield per validator (5 validators per core, 2 backed candidates, 3 disputes => 4*5 = 20),
+			// * 1 bitfield per validator (5 validators per core, 2 backed candidates, 3 disputes =>
+			//   4*5 = 20),
 			assert_eq!(expected_para_inherent_data.bitfields.len(), 25);
 			// * 2 backed candidates,
 			assert_eq!(expected_para_inherent_data.backed_candidates.len(), 2);
@@ -493,14 +498,16 @@ mod enter {
 			assert!(inherent_data_weight(&limit_inherent_data)
 				.all_lte(max_block_weight_proof_size_adjusted()));
 
-			// Three disputes is over weight (see previous test), so we expect to only see 2 disputes
+			// Three disputes is over weight (see previous test), so we expect to only see 2
+			// disputes
 			assert_eq!(limit_inherent_data.disputes.len(), 2);
 			// Ensure disputes are filtered as expected
 			assert_eq!(limit_inherent_data.disputes[0].session, 1);
 			assert_eq!(limit_inherent_data.disputes[1].session, 2);
 			// Ensure all bitfields are included as these are still not over weight
 			assert_eq!(limit_inherent_data.bitfields.len(), 20,);
-			// Ensure that all backed candidates are filtered out as either would make the block over weight
+			// Ensure that all backed candidates are filtered out as either would make the block
+			// over weight
 			assert_eq!(limit_inherent_data.backed_candidates.len(), 0);
 
 			assert_ok!(Pallet::<Test>::enter(
@@ -551,7 +558,8 @@ mod enter {
 			let expected_para_inherent_data = scenario.data.clone();
 
 			// Check the para inherent data is as expected:
-			// * 1 bitfield per validator (5 validators per core, 2 backed candidates, 3 disputes => 5*5 = 25)
+			// * 1 bitfield per validator (5 validators per core, 2 backed candidates, 3 disputes =>
+			//   5*5 = 25)
 			assert_eq!(expected_para_inherent_data.bitfields.len(), 25);
 			// * 2 backed candidates
 			assert_eq!(expected_para_inherent_data.backed_candidates.len(), 2);
@@ -632,7 +640,8 @@ mod enter {
 				.any_lt(inherent_data_weight(&expected_para_inherent_data)));
 
 			// Check the para inherent data is as expected:
-			// * 1 bitfield per validator (5 validators per core, 2 backed candidates, 3 disputes => 5*5 = 25)
+			// * 1 bitfield per validator (5 validators per core, 2 backed candidates, 3 disputes =>
+			//   5*5 = 25)
 			assert_eq!(expected_para_inherent_data.bitfields.len(), 25);
 			// * 2 backed candidates
 			assert_eq!(expected_para_inherent_data.backed_candidates.len(), 2);
@@ -645,7 +654,8 @@ mod enter {
 
 			let limit_inherent_data =
 				Pallet::<Test>::create_inherent_inner(&inherent_data.clone()).unwrap();
-			// Expect that inherent data is filtered to include only 1 backed candidate and 2 disputes
+			// Expect that inherent data is filtered to include only 1 backed candidate and 2
+			// disputes
 			assert!(limit_inherent_data != expected_para_inherent_data);
 			assert!(
 				max_block_weight_proof_size_adjusted()
@@ -727,7 +737,8 @@ mod enter {
 				.unwrap();
 			let limit_inherent_data =
 				Pallet::<Test>::create_inherent_inner(&inherent_data.clone()).unwrap();
-			// Expect that inherent data is filtered to include only 1 backed candidate and 2 disputes
+			// Expect that inherent data is filtered to include only 1 backed candidate and 2
+			// disputes
 			assert!(limit_inherent_data != expected_para_inherent_data);
 			assert!(
 				max_block_weight_proof_size_adjusted()
@@ -792,7 +803,8 @@ mod enter {
 
 			let limit_inherent_data =
 				Pallet::<Test>::create_inherent_inner(&inherent_data.clone()).unwrap();
-			// Expect that inherent data is filtered to include only 1 backed candidate and 2 disputes
+			// Expect that inherent data is filtered to include only 1 backed candidate and 2
+			// disputes
 			assert!(limit_inherent_data != expected_para_inherent_data);
 			assert!(
 				max_block_weight_proof_size_adjusted()
@@ -841,7 +853,8 @@ mod enter {
 				.any_lt(inherent_data_weight(&expected_para_inherent_data)));
 
 			// Check the para inherent data is as expected:
-			// * 1 bitfield per validator (5 validators per core, 2 backed candidates, 0 disputes => 2*5 = 10)
+			// * 1 bitfield per validator (5 validators per core, 2 backed candidates, 0 disputes =>
+			//   2*5 = 10)
 			assert_eq!(expected_para_inherent_data.bitfields.len(), 10);
 			// * 2 backed candidates
 			assert_eq!(expected_para_inherent_data.backed_candidates.len(), 2);
@@ -854,7 +867,8 @@ mod enter {
 
 			let limit_inherent_data =
 				Pallet::<Test>::create_inherent_inner(&inherent_data.clone()).unwrap();
-			// Expect that inherent data is filtered to include only 1 backed candidate and 2 disputes
+			// Expect that inherent data is filtered to include only 1 backed candidate and 2
+			// disputes
 			assert!(limit_inherent_data != expected_para_inherent_data);
 			assert!(
 				max_block_weight_proof_size_adjusted()
@@ -903,7 +917,8 @@ mod enter {
 				.any_lt(inherent_data_weight(&expected_para_inherent_data)));
 
 			// Check the para inherent data is as expected:
-			// * 1 bitfield per validator (5 validators per core, 30 backed candidates, 3 disputes => 5*33 = 165)
+			// * 1 bitfield per validator (5 validators per core, 30 backed candidates, 3 disputes
+			//   => 5*33 = 165)
 			assert_eq!(expected_para_inherent_data.bitfields.len(), 165);
 			// * 30 backed candidates
 			assert_eq!(expected_para_inherent_data.backed_candidates.len(), 30);
diff --git a/runtime/parachains/src/runtime_api_impl/v5.rs b/runtime/parachains/src/runtime_api_impl/v5.rs
index 1257c0c91702..4c9c8c911f62 100644
--- a/runtime/parachains/src/runtime_api_impl/v5.rs
+++ b/runtime/parachains/src/runtime_api_impl/v5.rs
@@ -393,7 +393,8 @@ pub fn pvfs_require_precheck<T: paras::Config>() -> Vec<ValidationCodeHash> {
 	<paras::Pallet<T>>::pvfs_require_precheck()
 }
 
-/// Returns the validation code hash for the given parachain making the given `OccupiedCoreAssumption`.
+/// Returns the validation code hash for the given parachain making the given
+/// `OccupiedCoreAssumption`.
 pub fn validation_code_hash<T>(
 	para_id: ParaId,
 	assumption: OccupiedCoreAssumption,
diff --git a/runtime/parachains/src/scheduler.rs b/runtime/parachains/src/scheduler.rs
index b69c16ae8d01..6882834187dc 100644
--- a/runtime/parachains/src/scheduler.rs
+++ b/runtime/parachains/src/scheduler.rs
@@ -21,19 +21,20 @@
 //!   - Scheduling parachains and parathreads
 //!
 //! It aims to achieve these tasks with these goals in mind:
-//! - It should be possible to know at least a block ahead-of-time, ideally more,
-//!   which validators are going to be assigned to which parachains.
-//! - Parachains that have a candidate pending availability in this fork of the chain
-//!   should not be assigned.
+//! - It should be possible to know at least a block ahead-of-time, ideally more, which validators
+//!   are going to be assigned to which parachains.
+//! - Parachains that have a candidate pending availability in this fork of the chain should not be
+//!   assigned.
 //! - Validator assignments should not be gameable. Malicious cartels should not be able to
 //!   manipulate the scheduler to assign themselves as desired.
-//! - High or close to optimal throughput of parachains and parathreads. Work among validator groups should be balanced.
+//! - High or close to optimal throughput of parachains and parathreads. Work among validator groups
+//!   should be balanced.
 //!
 //! The Scheduler manages resource allocation using the concept of "Availability Cores".
 //! There will be one availability core for each parachain, and a fixed number of cores
 //! used for multiplexing parathreads. Validators will be partitioned into groups, with the same
-//! number of groups as availability cores. Validator groups will be assigned to different availability cores
-//! over time.
+//! number of groups as availability cores. Validator groups will be assigned to different
+//! availability cores over time.
 
 use frame_support::pallet_prelude::*;
 use frame_system::pallet_prelude::BlockNumberFor;
@@ -169,8 +170,9 @@ pub mod pallet {
 	/// broader set of Polkadot validators, but instead just the subset used for parachains during
 	/// this session.
 	///
-	/// Bound: The number of cores is the sum of the numbers of parachains and parathread multiplexers.
-	/// Reasonably, 100-1000. The dominant factor is the number of validators: safe upper bound at 10k.
+	/// Bound: The number of cores is the sum of the numbers of parachains and parathread
+	/// multiplexers. Reasonably, 100-1000. The dominant factor is the number of validators: safe
+	/// upper bound at 10k.
 	#[pallet::storage]
 	#[pallet::getter(fn validator_groups)]
 	pub(crate) type ValidatorGroups<T> = StorageValue<_, Vec<Vec<ValidatorIndex>>, ValueQuery>;
@@ -182,8 +184,8 @@ pub mod pallet {
 	#[pallet::storage]
 	pub(crate) type ParathreadQueue<T> = StorageValue<_, ParathreadClaimQueue, ValueQuery>;
 
-	/// One entry for each availability core. Entries are `None` if the core is not currently occupied. Can be
-	/// temporarily `Some` if scheduled but not occupied.
+	/// One entry for each availability core. Entries are `None` if the core is not currently
+	/// occupied. Can be temporarily `Some` if scheduled but not occupied.
 	/// The i'th parachain belongs to the i'th core, with the remaining cores all being
 	/// parathread-multiplexers.
 	///
@@ -197,11 +199,13 @@ pub mod pallet {
 	/// An index used to ensure that only one claim on a parathread exists in the queue or is
 	/// currently being handled by an occupied core.
 	///
-	/// Bounded by the number of parathread cores and scheduling lookahead. Reasonably, 10 * 50 = 500.
+	/// Bounded by the number of parathread cores and scheduling lookahead. Reasonably, 10 * 50 =
+	/// 500.
 	#[pallet::storage]
 	pub(crate) type ParathreadClaimIndex<T> = StorageValue<_, Vec<ParaId>, ValueQuery>;
 
-	/// The block number where the session start occurred. Used to track how many group rotations have occurred.
+	/// The block number where the session start occurred. Used to track how many group rotations
+	/// have occurred.
 	///
 	/// Note that in the context of parachains modules the session change is signaled during
 	/// the block and enacted at the end of the block (at the finalization stage, to be exact).
@@ -215,8 +219,8 @@ pub mod pallet {
 	///
 	/// Bounded by the number of cores: one for each parachain and parathread multiplexer.
 	///
-	/// The value contained here will not be valid after the end of a block. Runtime APIs should be used to determine scheduled cores/
-	/// for the upcoming block.
+	/// The value contained here will not be valid after the end of a block. Runtime APIs should be
+	/// used to determine scheduled cores/ for the upcoming block.
 	#[pallet::storage]
 	#[pallet::getter(fn scheduled)]
 	pub(crate) type Scheduled<T> = StorageValue<_, Vec<CoreAssignment>, ValueQuery>;
@@ -380,8 +384,9 @@ impl<T: Config> Pallet<T> {
 		})
 	}
 
-	/// Free unassigned cores. Provide a list of cores that should be considered newly-freed along with the reason
-	/// for them being freed. The list is assumed to be sorted in ascending order by core index.
+	/// Free unassigned cores. Provide a list of cores that should be considered newly-freed along
+	/// with the reason for them being freed. The list is assumed to be sorted in ascending order by
+	/// core index.
 	pub(crate) fn free_cores(just_freed_cores: impl IntoIterator<Item = (CoreIndex, FreedReason)>) {
 		let config = <configuration::Pallet<T>>::config();
 
@@ -403,8 +408,8 @@ impl<T: Config> Pallet<T> {
 									})
 								},
 								FreedReason::TimedOut => {
-									// If a parathread candidate times out, it's not the collator's fault,
-									// so we don't increment retries.
+									// If a parathread candidate times out, it's not the collator's
+									// fault, so we don't increment retries.
 									ParathreadQueue::<T>::mutate(|queue| {
 										queue.enqueue_entry(entry, config.parathread_cores);
 									})
@@ -417,9 +422,9 @@ impl<T: Config> Pallet<T> {
 		})
 	}
 
-	/// Schedule all unassigned cores, where possible. Provide a list of cores that should be considered
-	/// newly-freed along with the reason for them being freed. The list is assumed to be sorted in
-	/// ascending order by core index.
+	/// Schedule all unassigned cores, where possible. Provide a list of cores that should be
+	/// considered newly-freed along with the reason for them being freed. The list is assumed to be
+	/// sorted in ascending order by core index.
 	pub(crate) fn schedule(
 		just_freed_cores: impl IntoIterator<Item = (CoreIndex, FreedReason)>,
 		now: BlockNumberFor<T>,
@@ -455,10 +460,10 @@ impl<T: Config> Pallet<T> {
 
 					// check the first entry already scheduled with core index >= than the one we
 					// are looking at. 3 cases:
-					//  1. No such entry, clearly this core is not scheduled, so we need to schedule and put at the end.
-					//  2. Entry exists and has same index as the core we are inspecting. do not schedule again.
-					//  3. Entry exists and has higher index than the core we are inspecting. schedule and note
-					//     insertion position.
+					//  1. No such entry, clearly this core is not scheduled, so we need to schedule
+					// and put at the end.  2. Entry exists and has same index as the core we are
+					// inspecting. do not schedule again.  3. Entry exists and has higher index than
+					// the core we are inspecting. schedule and note     insertion position.
 					prev_scheduled_in_order.peek().map_or(
 						Some(scheduled.len()),
 						|(idx_in_scheduled, assign)| {
@@ -509,8 +514,9 @@ impl<T: Config> Pallet<T> {
 				}
 			}
 
-			// at this point, because `Scheduled` is guaranteed to be sorted and we navigated unassigned
-			// core indices in ascending order, we can enact the updates prepared by the previous actions.
+			// at this point, because `Scheduled` is guaranteed to be sorted and we navigated
+			// unassigned core indices in ascending order, we can enact the updates prepared by the
+			// previous actions.
 			//
 			// while inserting, we have to account for the amount of insertions already done.
 			//
@@ -522,20 +528,20 @@ impl<T: Config> Pallet<T> {
 				scheduled.insert(insert_at, to_insert);
 			}
 
-			// scheduled is guaranteed to be sorted after this point because it was sorted before, and we
-			// applied sorted updates at their correct positions, accounting for the offsets of previous
-			// insertions.
+			// scheduled is guaranteed to be sorted after this point because it was sorted before,
+			// and we applied sorted updates at their correct positions, accounting for the offsets
+			// of previous insertions.
 		}
 
 		Scheduled::<T>::set(scheduled);
 		ParathreadQueue::<T>::set(parathread_queue);
 	}
 
-	/// Note that the given cores have become occupied. Behavior undefined if any of the given cores were not scheduled
-	/// or the slice is not sorted ascending by core index.
+	/// Note that the given cores have become occupied. Behavior undefined if any of the given cores
+	/// were not scheduled or the slice is not sorted ascending by core index.
 	///
-	/// Complexity: O(n) in the number of scheduled cores, which is capped at the number of total cores.
-	/// This is efficient in the case that most scheduled cores are occupied.
+	/// Complexity: O(n) in the number of scheduled cores, which is capped at the number of total
+	/// cores. This is efficient in the case that most scheduled cores are occupied.
 	pub(crate) fn occupied(now_occupied: &[CoreIndex]) {
 		if now_occupied.is_empty() {
 			return
@@ -568,8 +574,8 @@ impl<T: Config> Pallet<T> {
 		AvailabilityCores::<T>::set(availability_cores);
 	}
 
-	/// Get the para (chain or thread) ID assigned to a particular core or index, if any. Core indices
-	/// out of bounds will return `None`, as will indices of unassigned cores.
+	/// Get the para (chain or thread) ID assigned to a particular core or index, if any. Core
+	/// indices out of bounds will return `None`, as will indices of unassigned cores.
 	pub(crate) fn core_para(core_index: CoreIndex) -> Option<ParaId> {
 		let cores = AvailabilityCores::<T>::get();
 		match cores.get(core_index.0 as usize).and_then(|c| c.as_ref()) {
@@ -587,8 +593,9 @@ impl<T: Config> Pallet<T> {
 		ValidatorGroups::<T>::get().get(group_index.0 as usize).map(|g| g.clone())
 	}
 
-	/// Get the group assigned to a specific core by index at the current block number. Result undefined if the core index is unknown
-	/// or the block number is less than the session start index.
+	/// Get the group assigned to a specific core by index at the current block number. Result
+	/// undefined if the core index is unknown or the block number is less than the session start
+	/// index.
 	pub(crate) fn group_assigned_to_core(
 		core: CoreIndex,
 		at: BlockNumberFor<T>,
@@ -622,10 +629,11 @@ impl<T: Config> Pallet<T> {
 
 	/// Returns an optional predicate that should be used for timing out occupied cores.
 	///
-	/// If `None`, no timing-out should be done. The predicate accepts the index of the core, and the
-	/// block number since which it has been occupied, and the respective parachain and parathread
-	/// timeouts, i.e. only within `max(config.chain_availability_period, config.thread_availability_period)`
-	/// of the last rotation would this return `Some`, unless there are no rotations.
+	/// If `None`, no timing-out should be done. The predicate accepts the index of the core, and
+	/// the block number since which it has been occupied, and the respective parachain and
+	/// parathread timeouts, i.e. only within `max(config.chain_availability_period,
+	/// config.thread_availability_period)` of the last rotation would this return `Some`, unless
+	/// there are no rotations.
 	///
 	/// This really should not be a box, but is working around a compiler limitation filed here:
 	/// https://github.com/rust-lang/rust/issues/73226
diff --git a/runtime/parachains/src/scheduler/tests.rs b/runtime/parachains/src/scheduler/tests.rs
index 2188bb15b2e5..c4830f4bf253 100644
--- a/runtime/parachains/src/scheduler/tests.rs
+++ b/runtime/parachains/src/scheduler/tests.rs
@@ -56,7 +56,8 @@ fn run_to_block(
 
 		if let Some(notification) = new_session(b + 1) {
 			let mut notification_with_session_index = notification;
-			// We will make every session change trigger an action queue. Normally this may require 2 or more session changes.
+			// We will make every session change trigger an action queue. Normally this may require
+			// 2 or more session changes.
 			if notification_with_session_index.session_index == SessionIndex::default() {
 				notification_with_session_index.session_index = ParasShared::scheduled_session();
 			}
@@ -104,8 +105,9 @@ fn default_config() -> HostConfiguration<BlockNumber> {
 		scheduling_lookahead: 2,
 		parathread_retries: 1,
 		// This field does not affect anything that scheduler does. However, `HostConfiguration`
-		// is still a subject to consistency test. It requires that `minimum_validation_upgrade_delay`
-		// is greater than `chain_availability_period` and `thread_availability_period`.
+		// is still a subject to consistency test. It requires that
+		// `minimum_validation_upgrade_delay` is greater than `chain_availability_period` and
+		// `thread_availability_period`.
 		minimum_validation_upgrade_delay: 6,
 		..Default::default()
 	}
@@ -626,9 +628,9 @@ fn schedule_schedules_including_just_freed() {
 			assert!(Scheduler::scheduled().is_empty());
 		}
 
-		// add a couple more parathread claims - the claim on `b` will go to the 3rd parathread core (4)
-		// and the claim on `d` will go back to the 1st parathread core (2). The claim on `e` then
-		// will go for core `3`.
+		// add a couple more parathread claims - the claim on `b` will go to the 3rd parathread core
+		// (4) and the claim on `d` will go back to the 1st parathread core (2). The claim on `e`
+		// then will go for core `3`.
 		Scheduler::add_parathread_claim(ParathreadClaim(thread_b, collator.clone()));
 		Scheduler::add_parathread_claim(ParathreadClaim(thread_d, collator.clone()));
 		Scheduler::add_parathread_claim(ParathreadClaim(thread_e, collator.clone()));
diff --git a/runtime/parachains/src/shared.rs b/runtime/parachains/src/shared.rs
index 857e671f0ee4..6b50bcce4054 100644
--- a/runtime/parachains/src/shared.rs
+++ b/runtime/parachains/src/shared.rs
@@ -62,8 +62,8 @@ pub mod pallet {
 	pub(super) type ActiveValidatorIndices<T: Config> =
 		StorageValue<_, Vec<ValidatorIndex>, ValueQuery>;
 
-	/// The parachain attestation keys of the validators actively participating in parachain consensus.
-	/// This should be the same length as `ActiveValidatorIndices`.
+	/// The parachain attestation keys of the validators actively participating in parachain
+	/// consensus. This should be the same length as `ActiveValidatorIndices`.
 	#[pallet::storage]
 	#[pallet::getter(fn active_validator_keys)]
 	pub(super) type ActiveValidatorKeys<T: Config> = StorageValue<_, Vec<ValidatorId>, ValueQuery>;
diff --git a/runtime/parachains/src/util.rs b/runtime/parachains/src/util.rs
index d5b339b679e3..aa07ef080055 100644
--- a/runtime/parachains/src/util.rs
+++ b/runtime/parachains/src/util.rs
@@ -48,7 +48,7 @@ pub fn make_persisted_validation_data<T: paras::Config + hrmp::Config>(
 /// the order of the `active` vec, the second item will contain the rest, in the original order.
 ///
 /// ```ignore
-///		split_active_subset(active, all).0 == take_active_subset(active, all)
+/// 		split_active_subset(active, all).0 == take_active_subset(active, all)
 /// ```
 pub fn split_active_subset<T: Clone>(active: &[ValidatorIndex], all: &[T]) -> (Vec<T>, Vec<T>) {
 	let active_set: BTreeSet<_> = active.iter().cloned().collect();
@@ -76,7 +76,7 @@ pub fn split_active_subset<T: Clone>(active: &[ValidatorIndex], all: &[T]) -> (V
 /// Uses `split_active_subset` and concatenates the inactive to the active vec.
 ///
 /// ```ignore
-///		split_active_subset(active, all)[0..active.len()]) == take_active_subset(active, all)
+/// 		split_active_subset(active, all)[0..active.len()]) == take_active_subset(active, all)
 /// ```
 pub fn take_active_subset_and_inactive<T: Clone>(active: &[ValidatorIndex], all: &[T]) -> Vec<T> {
 	let (mut a, mut i) = split_active_subset(active, all);
diff --git a/runtime/polkadot/src/governance/old.rs b/runtime/polkadot/src/governance/old.rs
index f4c2655a784a..4c7b503472f2 100644
--- a/runtime/polkadot/src/governance/old.rs
+++ b/runtime/polkadot/src/governance/old.rs
@@ -45,7 +45,8 @@ impl pallet_democracy::Config for Runtime {
 		pallet_collective::EnsureProportionAtLeast<AccountId, CouncilCollective, 1, 2>,
 		frame_system::EnsureRoot<AccountId>,
 	>;
-	/// A 60% super-majority can have the next scheduled referendum be a straight majority-carries vote.
+	/// A 60% super-majority can have the next scheduled referendum be a straight majority-carries
+	/// vote.
 	type ExternalMajorityOrigin = EitherOfDiverse<
 		pallet_collective::EnsureProportionAtLeast<AccountId, CouncilCollective, 3, 5>,
 		frame_system::EnsureRoot<AccountId>,
diff --git a/runtime/polkadot/src/xcm_config.rs b/runtime/polkadot/src/xcm_config.rs
index 867253ea0346..faae2e1d2619 100644
--- a/runtime/polkadot/src/xcm_config.rs
+++ b/runtime/polkadot/src/xcm_config.rs
@@ -63,8 +63,8 @@ parameter_types! {
 	pub LocalCheckAccount: (AccountId, MintLocation) = (CheckAccount::get(), MintLocation::Local);
 }
 
-/// The canonical means of converting a `MultiLocation` into an `AccountId`, used when we want to determine
-/// the sovereign account controlled by a location.
+/// The canonical means of converting a `MultiLocation` into an `AccountId`, used when we want to
+/// determine the sovereign account controlled by a location.
 pub type SovereignAccountOf = (
 	// We can convert a child parachain using the standard `AccountId` conversion.
 	ChildParachainConvertsVia<ParaId, AccountId>,
@@ -72,8 +72,8 @@ pub type SovereignAccountOf = (
 	AccountId32Aliases<ThisNetwork, AccountId>,
 );
 
-/// Our asset transactor. This is what allows us to interact with the runtime assets from the point of
-/// view of XCM-only concepts like `MultiLocation` and `MultiAsset`.
+/// Our asset transactor. This is what allows us to interact with the runtime assets from the point
+/// of view of XCM-only concepts like `MultiLocation` and `MultiAsset`.
 ///
 /// Ours is only aware of the Balances pallet, which is mapped to `TokenLocation`.
 pub type LocalAssetTransactor = XcmCurrencyAdapter<
@@ -369,8 +369,8 @@ pub type CouncilToPlurality = BackingToPlurality<
 	CouncilBodyId,
 >;
 
-/// Type to convert an `Origin` type value into a `MultiLocation` value which represents an interior location
-/// of this chain.
+/// Type to convert an `Origin` type value into a `MultiLocation` value which represents an interior
+/// location of this chain.
 pub type LocalOriginToLocation = (
 	CouncilToPlurality,
 	// And a usual Signed origin to be used in XCM as a corresponding AccountId32
@@ -385,11 +385,11 @@ pub type StakingAdminToPlurality =
 pub type FellowshipAdminToPlurality =
 	OriginToPluralityVoice<RuntimeOrigin, FellowshipAdmin, FellowshipAdminBodyId>;
 
-/// Type to convert a pallet `Origin` type value into a `MultiLocation` value which represents an interior location
-/// of this chain for a destination chain.
+/// Type to convert a pallet `Origin` type value into a `MultiLocation` value which represents an
+/// interior location of this chain for a destination chain.
 pub type LocalPalletOriginToLocation = (
-	// We allow an origin from the Collective pallet to be used in XCM as a corresponding Plurality of the
-	// `Unit` body.
+	// We allow an origin from the Collective pallet to be used in XCM as a corresponding Plurality
+	// of the `Unit` body.
 	CouncilToPlurality,
 	// StakingAdmin origin to be used in XCM as a corresponding Plurality `MultiLocation` value.
 	StakingAdminToPlurality,
@@ -399,7 +399,8 @@ pub type LocalPalletOriginToLocation = (
 
 impl pallet_xcm::Config for Runtime {
 	type RuntimeEvent = RuntimeEvent;
-	// We only allow the root, the council, the fellowship admin and the staking admin to send messages.
+	// We only allow the root, the council, the fellowship admin and the staking admin to send
+	// messages.
 	type SendXcmOrigin = xcm_builder::EnsureXcmOrigin<RuntimeOrigin, LocalPalletOriginToLocation>;
 	type XcmRouter = XcmRouter;
 	// Anyone can execute XCM messages locally...
diff --git a/runtime/rococo/src/xcm_config.rs b/runtime/rococo/src/xcm_config.rs
index 714a4f69e759..75e06391c56b 100644
--- a/runtime/rococo/src/xcm_config.rs
+++ b/runtime/rococo/src/xcm_config.rs
@@ -56,8 +56,8 @@ parameter_types! {
 pub type LocationConverter =
 	(ChildParachainConvertsVia<ParaId, AccountId>, AccountId32Aliases<ThisNetwork, AccountId>);
 
-/// Our asset transactor. This is what allows us to interest with the runtime facilities from the point of
-/// view of XCM-only concepts like `MultiLocation` and `MultiAsset`.
+/// Our asset transactor. This is what allows us to interest with the runtime facilities from the
+/// point of view of XCM-only concepts like `MultiLocation` and `MultiAsset`.
 ///
 /// Ours is only aware of the Balances pallet, which is mapped to `RocLocation`.
 pub type LocalAssetTransactor = XcmCurrencyAdapter<
@@ -342,11 +342,11 @@ pub type CouncilToPlurality = BackingToPlurality<
 	CouncilBodyId,
 >;
 
-/// Type to convert an `Origin` type value into a `MultiLocation` value which represents an interior location
-/// of this chain.
+/// Type to convert an `Origin` type value into a `MultiLocation` value which represents an interior
+/// location of this chain.
 pub type LocalOriginToLocation = (
-	// We allow an origin from the Collective pallet to be used in XCM as a corresponding Plurality of the
-	// `Unit` body.
+	// We allow an origin from the Collective pallet to be used in XCM as a corresponding Plurality
+	// of the `Unit` body.
 	CouncilToPlurality,
 	// And a usual Signed origin to be used in XCM as a corresponding AccountId32
 	SignedToAccountId32<RuntimeOrigin, AccountId, ThisNetwork>,
diff --git a/runtime/test-runtime/src/lib.rs b/runtime/test-runtime/src/lib.rs
index c9f3aa6cb203..d7594e67c12a 100644
--- a/runtime/test-runtime/src/lib.rs
+++ b/runtime/test-runtime/src/lib.rs
@@ -355,8 +355,8 @@ impl pallet_staking::Config for Runtime {
 	type NextNewSession = Session;
 	type ElectionProvider = onchain::OnChainExecution<OnChainSeqPhragmen>;
 	type GenesisElectionProvider = onchain::OnChainExecution<OnChainSeqPhragmen>;
-	// Use the nominator map to iter voter AND no-ops for all SortedListProvider hooks. The migration
-	// to bags-list is a no-op, but the storage version will be updated.
+	// Use the nominator map to iter voter AND no-ops for all SortedListProvider hooks. The
+	// migration to bags-list is a no-op, but the storage version will be updated.
 	type VoterList = pallet_staking::UseNominatorsAndValidatorsMap<Runtime>;
 	type TargetList = pallet_staking::UseValidatorsMap<Runtime>;
 	type NominationsQuota = pallet_staking::FixedNominationsQuota<MAX_QUOTA_NOMINATIONS>;
diff --git a/runtime/test-runtime/src/xcm_config.rs b/runtime/test-runtime/src/xcm_config.rs
index 21ce8c877dc3..2113bbae66ad 100644
--- a/runtime/test-runtime/src/xcm_config.rs
+++ b/runtime/test-runtime/src/xcm_config.rs
@@ -38,8 +38,8 @@ parameter_types! {
 	pub const UniversalLocation: xcm::latest::InteriorMultiLocation = xcm::latest::Junctions::Here;
 }
 
-/// Type to convert an `Origin` type value into a `MultiLocation` value which represents an interior location
-/// of this chain.
+/// Type to convert an `Origin` type value into a `MultiLocation` value which represents an interior
+/// location of this chain.
 pub type LocalOriginToLocation = (
 	// And a usual Signed origin to be used in XCM as a corresponding AccountId32
 	SignedToAccountId32<crate::RuntimeOrigin, crate::AccountId, AnyNetwork>,
diff --git a/runtime/westend/src/lib.rs b/runtime/westend/src/lib.rs
index 4b4659442cff..9bb5a6db613d 100644
--- a/runtime/westend/src/lib.rs
+++ b/runtime/westend/src/lib.rs
@@ -338,8 +338,8 @@ pub struct MaybeSignedPhase;
 
 impl Get<u32> for MaybeSignedPhase {
 	fn get() -> u32 {
-		// 1 day = 4 eras -> 1 week = 28 eras. We want to disable signed phase once a week to test the fallback unsigned
-		// phase is able to compute elections on Westend.
+		// 1 day = 4 eras -> 1 week = 28 eras. We want to disable signed phase once a week to test
+		// the fallback unsigned phase is able to compute elections on Westend.
 		if Staking::current_era().unwrap_or(1) % 28 == 0 {
 			0
 		} else {
diff --git a/runtime/westend/src/xcm_config.rs b/runtime/westend/src/xcm_config.rs
index d6a3feb3bc0f..a83c38c9f66f 100644
--- a/runtime/westend/src/xcm_config.rs
+++ b/runtime/westend/src/xcm_config.rs
@@ -271,8 +271,8 @@ impl xcm_executor::Config for XcmConfig {
 	type Aliasers = Nothing;
 }
 
-/// Type to convert an `Origin` type value into a `MultiLocation` value which represents an interior location
-/// of this chain.
+/// Type to convert an `Origin` type value into a `MultiLocation` value which represents an interior
+/// location of this chain.
 pub type LocalOriginToLocation = (
 	// And a usual Signed origin to be used in XCM as a corresponding AccountId32
 	SignedToAccountId32<RuntimeOrigin, AccountId, ThisNetwork>,
diff --git a/rustfmt.toml b/rustfmt.toml
index 542c561edd42..e2c4a037f37f 100644
--- a/rustfmt.toml
+++ b/rustfmt.toml
@@ -1,12 +1,20 @@
 # Basic
+edition = "2021"
 hard_tabs = true
 max_width = 100
 use_small_heuristics = "Max"
+
 # Imports
 imports_granularity = "Crate"
 reorder_imports = true
+
 # Consistency
 newline_style = "Unix"
+
+# Format comments
+comment_width = 100
+wrap_comments = true
+
 # Misc
 chain_width = 80
 spaces_around_ranges = false
@@ -18,7 +26,3 @@ match_block_trailing_comma = true
 trailing_comma = "Vertical"
 trailing_semicolon = false
 use_field_init_shorthand = true
-ignore = [
-    "bridges",
-]
-edition = "2021"
diff --git a/scripts/ci/gitlab/pipeline/test.yml b/scripts/ci/gitlab/pipeline/test.yml
index b45c4c1be890..ea629f189dc8 100644
--- a/scripts/ci/gitlab/pipeline/test.yml
+++ b/scripts/ci/gitlab/pipeline/test.yml
@@ -114,4 +114,5 @@ cargo-clippy:
     - .docker-env
     - .test-refs
   script:
+    - cargo version && cargo clippy --version
     - SKIP_WASM_BUILD=1 env -u RUSTFLAGS cargo clippy --locked --all-targets
diff --git a/statement-table/src/generic.rs b/statement-table/src/generic.rs
index fcd261b438b3..9aa445becce0 100644
--- a/statement-table/src/generic.rs
+++ b/statement-table/src/generic.rs
@@ -96,8 +96,8 @@ pub enum ValidityDoubleVote<Candidate, Digest, Signature> {
 }
 
 impl<Candidate, Digest, Signature> ValidityDoubleVote<Candidate, Digest, Signature> {
-	/// Deconstruct this misbehavior into two `(Statement, Signature)` pairs, erasing the information
-	/// about precisely what the problem was.
+	/// Deconstruct this misbehavior into two `(Statement, Signature)` pairs, erasing the
+	/// information about precisely what the problem was.
 	pub fn deconstruct<Ctx>(
 		self,
 	) -> ((Statement<Candidate, Digest>, Signature), (Statement<Candidate, Digest>, Signature))
@@ -124,8 +124,8 @@ pub enum DoubleSign<Candidate, Digest, Signature> {
 }
 
 impl<Candidate, Digest, Signature> DoubleSign<Candidate, Digest, Signature> {
-	/// Deconstruct this misbehavior into a statement with two signatures, erasing the information about
-	/// precisely where in the process the issue was detected.
+	/// Deconstruct this misbehavior into a statement with two signatures, erasing the information
+	/// about precisely where in the process the issue was detected.
 	pub fn deconstruct(self) -> (Statement<Candidate, Digest>, Signature, Signature) {
 		match self {
 			Self::Seconded(candidate, a, b) => (Statement::Seconded(candidate), a, b),
@@ -555,10 +555,11 @@ impl<'a, Ctx: Context> Iterator for DrainMisbehaviors<'a, Ctx> {
 	type Item = (Ctx::AuthorityId, MisbehaviorFor<Ctx>);
 
 	fn next(&mut self) -> Option<Self::Item> {
-		// Note: this implementation will prematurely return `None` if `self.drain.next()` ever returns a
-		// tuple whose vector is empty. That will never currently happen, as the only modification
-		// to the backing map is currently via `drain` and `entry(...).or_default().push(...)`.
-		// However, future code changes might change that property.
+		// Note: this implementation will prematurely return `None` if `self.drain.next()` ever
+		// returns a tuple whose vector is empty. That will never currently happen, as the only
+		// modification to the backing map is currently via `drain` and
+		// `entry(...).or_default().push(...)`. However, future code changes might change that
+		// property.
 		self.maybe_item().or_else(|| {
 			self.in_progress = self.drain.next().map(Into::into);
 			self.maybe_item()
diff --git a/tests/common.rs b/tests/common.rs
index 39b92732498f..940a0c6f18d0 100644
--- a/tests/common.rs
+++ b/tests/common.rs
@@ -76,7 +76,8 @@ async fn wait_n_finalized_blocks_from(n: usize, url: &str) {
 /// This is hack to get the actual binded sockaddr because
 /// polkadot assigns a random port if the specified port was already binded.
 ///
-/// You must call `Command::new("cmd").stdout(process::Stdio::piped()).stderr(process::Stdio::piped())`
+/// You must call
+/// `Command::new("cmd").stdout(process::Stdio::piped()).stderr(process::Stdio::piped())`
 /// for this to work.
 pub fn find_ws_url_from_output(read: impl Read + Send) -> (String, String) {
 	let mut data = String::new();
diff --git a/utils/staking-miner/src/opts.rs b/utils/staking-miner/src/opts.rs
index 819511b55b18..ecffe4531014 100644
--- a/utils/staking-miner/src/opts.rs
+++ b/utils/staking-miner/src/opts.rs
@@ -58,8 +58,8 @@ pub(crate) enum Command {
 #[derive(Debug, Clone, Parser)]
 #[cfg_attr(test, derive(PartialEq))]
 pub(crate) struct MonitorConfig {
-	/// The path to a file containing the seed of the account. If the file is not found, the seed is
-	/// used as-is.
+	/// The path to a file containing the seed of the account. If the file is not found, the seed
+	/// is used as-is.
 	///
 	/// Can also be provided via the `SEED` environment variable.
 	///
@@ -88,9 +88,11 @@ pub(crate) struct MonitorConfig {
 	///
 	/// `--submission-strategy always`: always submit.
 	///
-	/// `--submission-strategy "percent-better <percent>"`: submit if the submission is `n` percent better.
+	/// `--submission-strategy "percent-better <percent>"`: submit if the submission is `n` percent
+	/// better.
 	///
-	/// `--submission-strategy "no-worse-than  <percent>"`: submit if submission is no more than `n` percent worse.
+	/// `--submission-strategy "no-worse-than  <percent>"`: submit if submission is no more than
+	/// `n` percent worse.
 	#[clap(long, default_value = "if-leading")]
 	pub submission_strategy: SubmissionStrategy,
 
@@ -100,8 +102,8 @@ pub(crate) struct MonitorConfig {
 	/// a delay can be enforced to avoid submitting at
 	/// "same time" and risk potential races with other miners.
 	///
-	/// When this is enabled and there are competing solutions, your solution might not be submitted
-	/// if the scores are equal.
+	/// When this is enabled and there are competing solutions, your solution might not be
+	/// submitted if the scores are equal.
 	#[arg(long, default_value_t = 0)]
 	pub delay: usize,
 }
@@ -109,8 +111,8 @@ pub(crate) struct MonitorConfig {
 #[derive(Debug, Clone, Parser)]
 #[cfg_attr(test, derive(PartialEq))]
 pub(crate) struct DryRunConfig {
-	/// The path to a file containing the seed of the account. If the file is not found, the seed is
-	/// used as-is.
+	/// The path to a file containing the seed of the account. If the file is not found, the seed
+	/// is used as-is.
 	///
 	/// Can also be provided via the `SEED` environment variable.
 	///
@@ -165,8 +167,8 @@ pub enum SubmissionStrategy {
 	IfLeading,
 	/// Submit if we are no worse than `Perbill` worse than the best.
 	ClaimNoWorseThan(Perbill),
-	/// Submit if we are leading, or if the solution that's leading is more that the given `Perbill`
-	/// better than us. This helps detect obviously fake solutions and still combat them.
+	/// Submit if we are leading, or if the solution that's leading is more that the given
+	/// `Perbill` better than us. This helps detect obviously fake solutions and still combat them.
 	ClaimBetterThan(Perbill),
 }
 
@@ -189,8 +191,8 @@ pub(crate) enum Solver {
 /// * --submission-strategy if-leading: only submit if leading
 /// * --submission-strategy always: always submit
 /// * --submission-strategy "percent-better <percent>": submit if submission is `n` percent better.
-/// * --submission-strategy "no-worse-than<percent>": submit if submission is no more than `n` percent worse.
-///
+/// * --submission-strategy "no-worse-than<percent>": submit if submission is no more than `n`
+///   percent worse.
 impl FromStr for SubmissionStrategy {
 	type Err = String;
 
diff --git a/utils/staking-miner/src/rpc.rs b/utils/staking-miner/src/rpc.rs
index a95e89191a49..2d25616e2a17 100644
--- a/utils/staking-miner/src/rpc.rs
+++ b/utils/staking-miner/src/rpc.rs
@@ -61,7 +61,8 @@ pub trait RpcApi {
 		at: Option<&Hash>,
 	) -> RpcResult<RuntimeDispatchInfo<Balance>>;
 
-	/// Dry run an extrinsic at a given block. Return SCALE encoded [`sp_runtime::ApplyExtrinsicResult`].
+	/// Dry run an extrinsic at a given block. Return SCALE encoded
+	/// [`sp_runtime::ApplyExtrinsicResult`].
 	#[method(name = "system_dryRun")]
 	async fn dry_run(&self, extrinsic: &Bytes, at: Option<Hash>) -> RpcResult<Bytes>;
 
diff --git a/xcm/pallet-xcm-benchmarks/src/generic/mod.rs b/xcm/pallet-xcm-benchmarks/src/generic/mod.rs
index e5fce008a0f2..195066ee5b48 100644
--- a/xcm/pallet-xcm-benchmarks/src/generic/mod.rs
+++ b/xcm/pallet-xcm-benchmarks/src/generic/mod.rs
@@ -52,7 +52,8 @@ pub mod pallet {
 		/// If set to `Err`, benchmarks which rely on an `exchange_asset` will be skipped.
 		fn worst_case_asset_exchange() -> Result<(MultiAssets, MultiAssets), BenchmarkError>;
 
-		/// A `(MultiLocation, Junction)` that is one of the `UniversalAliases` configured by the XCM executor.
+		/// A `(MultiLocation, Junction)` that is one of the `UniversalAliases` configured by the
+		/// XCM executor.
 		///
 		/// If set to `Err`, benchmarks which rely on a universal alias will be skipped.
 		fn universal_alias() -> Result<(MultiLocation, Junction), BenchmarkError>;
@@ -75,13 +76,15 @@ pub mod pallet {
 		/// Return an unlocker, owner and assets that can be locked and unlocked.
 		fn unlockable_asset() -> Result<(MultiLocation, MultiLocation, MultiAsset), BenchmarkError>;
 
-		/// A `(MultiLocation, NetworkId, InteriorMultiLocation)` we can successfully export message to.
+		/// A `(MultiLocation, NetworkId, InteriorMultiLocation)` we can successfully export message
+		/// to.
 		///
 		/// If set to `Err`, benchmarks which rely on `export_message` will be skipped.
 		fn export_message_origin_and_destination(
 		) -> Result<(MultiLocation, NetworkId, InteriorMultiLocation), BenchmarkError>;
 
-		/// A `(MultiLocation, MultiLocation)` that is one of the `Aliasers` configured by the XCM executor.
+		/// A `(MultiLocation, MultiLocation)` that is one of the `Aliasers` configured by the XCM
+		/// executor.
 		///
 		/// If set to `Err`, benchmarks which rely on a universal alias will be skipped.
 		fn alias_origin() -> Result<(MultiLocation, MultiLocation), BenchmarkError>;
diff --git a/xcm/pallet-xcm/src/lib.rs b/xcm/pallet-xcm/src/lib.rs
index d52d5ba24271..aefcf30910ed 100644
--- a/xcm/pallet-xcm/src/lib.rs
+++ b/xcm/pallet-xcm/src/lib.rs
@@ -195,9 +195,9 @@ pub mod pallet {
 		/// The type used to actually dispatch an XCM to its destination.
 		type XcmRouter: SendXcm;
 
-		/// Required origin for executing XCM messages, including the teleport functionality. If successful,
-		/// then it resolves to `MultiLocation` which exists as an interior location within this chain's XCM
-		/// context.
+		/// Required origin for executing XCM messages, including the teleport functionality. If
+		/// successful, then it resolves to `MultiLocation` which exists as an interior location
+		/// within this chain's XCM context.
 		type ExecuteXcmOrigin: EnsureOrigin<
 			<Self as SysConfig>::RuntimeOrigin,
 			Success = MultiLocation,
@@ -212,7 +212,8 @@ pub mod pallet {
 		/// Our XCM filter which messages to be teleported using the dedicated extrinsic must pass.
 		type XcmTeleportFilter: Contains<(MultiLocation, Vec<MultiAsset>)>;
 
-		/// Our XCM filter which messages to be reserve-transferred using the dedicated extrinsic must pass.
+		/// Our XCM filter which messages to be reserve-transferred using the dedicated extrinsic
+		/// must pass.
 		type XcmReserveTransferFilter: Contains<(MultiLocation, Vec<MultiAsset>)>;
 
 		/// Means of measuring the weight consumed by an XCM message locally.
@@ -290,8 +291,8 @@ pub mod pallet {
 		/// Query response has been received and query is removed. The registered notification has
 		/// been dispatched and executed successfully.
 		Notified { query_id: QueryId, pallet_index: u8, call_index: u8 },
-		/// Query response has been received and query is removed. The registered notification could
-		/// not be dispatched because the dispatch weight is greater than the maximum weight
+		/// Query response has been received and query is removed. The registered notification
+		/// could not be dispatched because the dispatch weight is greater than the maximum weight
 		/// originally budgeted by this runtime for the query result.
 		NotifyOverweight {
 			query_id: QueryId,
@@ -371,7 +372,8 @@ pub mod pallet {
 			cost: MultiAssets,
 			message_id: XcmHash,
 		},
-		/// We have requested that a remote chain stops sending us XCM version change notifications.
+		/// We have requested that a remote chain stops sending us XCM version change
+		/// notifications.
 		VersionNotifyUnrequested {
 			destination: MultiLocation,
 			cost: MultiAssets,
@@ -402,8 +404,8 @@ pub mod pallet {
 		/// The desired destination was unreachable, generally because there is a no way of routing
 		/// to it.
 		Unreachable,
-		/// There was some other issue (i.e. not to do with routing) in sending the message. Perhaps
-		/// a lack of space for buffering the message.
+		/// There was some other issue (i.e. not to do with routing) in sending the message.
+		/// Perhaps a lack of space for buffering the message.
 		SendFailure,
 		/// The message execution fails the filter.
 		Filtered,
@@ -791,12 +793,13 @@ pub mod pallet {
 		/// with all fees taken as needed from the asset.
 		///
 		/// - `origin`: Must be capable of withdrawing the `assets` and executing XCM.
-		/// - `dest`: Destination context for the assets. Will typically be `X2(Parent, Parachain(..))` to send
-		///   from parachain to parachain, or `X1(Parachain(..))` to send from relay to parachain.
-		/// - `beneficiary`: A beneficiary location for the assets in the context of `dest`. Will generally be
-		///   an `AccountId32` value.
-		/// - `assets`: The assets to be withdrawn. The first item should be the currency used to to pay the fee on the
-		///   `dest` side. May not be empty.
+		/// - `dest`: Destination context for the assets. Will typically be `X2(Parent,
+		///   Parachain(..))` to send from parachain to parachain, or `X1(Parachain(..))` to send
+		///   from relay to parachain.
+		/// - `beneficiary`: A beneficiary location for the assets in the context of `dest`. Will
+		///   generally be an `AccountId32` value.
+		/// - `assets`: The assets to be withdrawn. The first item should be the currency used to to
+		///   pay the fee on the `dest` side. May not be empty.
 		/// - `fee_asset_item`: The index into `assets` of the item which should be used to pay
 		///   fees.
 		#[pallet::call_index(1)]
@@ -839,12 +842,13 @@ pub mod pallet {
 		/// with all fees taken as needed from the asset.
 		///
 		/// - `origin`: Must be capable of withdrawing the `assets` and executing XCM.
-		/// - `dest`: Destination context for the assets. Will typically be `X2(Parent, Parachain(..))` to send
-		///   from parachain to parachain, or `X1(Parachain(..))` to send from relay to parachain.
-		/// - `beneficiary`: A beneficiary location for the assets in the context of `dest`. Will generally be
-		///   an `AccountId32` value.
-		/// - `assets`: The assets to be withdrawn. This should include the assets used to pay the fee on the
-		///   `dest` side.
+		/// - `dest`: Destination context for the assets. Will typically be `X2(Parent,
+		///   Parachain(..))` to send from parachain to parachain, or `X1(Parachain(..))` to send
+		///   from relay to parachain.
+		/// - `beneficiary`: A beneficiary location for the assets in the context of `dest`. Will
+		///   generally be an `AccountId32` value.
+		/// - `assets`: The assets to be withdrawn. This should include the assets used to pay the
+		///   fee on the `dest` side.
 		/// - `fee_asset_item`: The index into `assets` of the item which should be used to pay
 		///   fees.
 		#[pallet::call_index(2)]
@@ -885,12 +889,12 @@ pub mod pallet {
 		/// An event is deposited indicating whether `msg` could be executed completely or only
 		/// partially.
 		///
-		/// No more than `max_weight` will be used in its attempted execution. If this is less than the
-		/// maximum amount of weight that the message could take to be executed, then no execution
-		/// attempt will be made.
+		/// No more than `max_weight` will be used in its attempted execution. If this is less than
+		/// the maximum amount of weight that the message could take to be executed, then no
+		/// execution attempt will be made.
 		///
-		/// NOTE: A successful return to this does *not* imply that the `msg` was executed successfully
-		/// to completion; only that *some* of it was executed.
+		/// NOTE: A successful return to this does *not* imply that the `msg` was executed
+		/// successfully to completion; only that *some* of it was executed.
 		#[pallet::call_index(3)]
 		#[pallet::weight(max_weight.saturating_add(T::WeightInfo::execute()))]
 		pub fn execute(
@@ -1012,12 +1016,13 @@ pub mod pallet {
 		/// at risk.
 		///
 		/// - `origin`: Must be capable of withdrawing the `assets` and executing XCM.
-		/// - `dest`: Destination context for the assets. Will typically be `X2(Parent, Parachain(..))` to send
-		///   from parachain to parachain, or `X1(Parachain(..))` to send from relay to parachain.
-		/// - `beneficiary`: A beneficiary location for the assets in the context of `dest`. Will generally be
-		///   an `AccountId32` value.
-		/// - `assets`: The assets to be withdrawn. This should include the assets used to pay the fee on the
-		///   `dest` side.
+		/// - `dest`: Destination context for the assets. Will typically be `X2(Parent,
+		///   Parachain(..))` to send from parachain to parachain, or `X1(Parachain(..))` to send
+		///   from relay to parachain.
+		/// - `beneficiary`: A beneficiary location for the assets in the context of `dest`. Will
+		///   generally be an `AccountId32` value.
+		/// - `assets`: The assets to be withdrawn. This should include the assets used to pay the
+		///   fee on the `dest` side.
 		/// - `fee_asset_item`: The index into `assets` of the item which should be used to pay
 		///   fees.
 		/// - `weight_limit`: The remote-side weight limit, if any, for the XCM fee purchase.
@@ -1063,12 +1068,13 @@ pub mod pallet {
 		/// at risk.
 		///
 		/// - `origin`: Must be capable of withdrawing the `assets` and executing XCM.
-		/// - `dest`: Destination context for the assets. Will typically be `X2(Parent, Parachain(..))` to send
-		///   from parachain to parachain, or `X1(Parachain(..))` to send from relay to parachain.
-		/// - `beneficiary`: A beneficiary location for the assets in the context of `dest`. Will generally be
-		///   an `AccountId32` value.
-		/// - `assets`: The assets to be withdrawn. The first item should be the currency used to to pay the fee on the
-		///   `dest` side. May not be empty.
+		/// - `dest`: Destination context for the assets. Will typically be `X2(Parent,
+		///   Parachain(..))` to send from parachain to parachain, or `X1(Parachain(..))` to send
+		///   from relay to parachain.
+		/// - `beneficiary`: A beneficiary location for the assets in the context of `dest`. Will
+		///   generally be an `AccountId32` value.
+		/// - `assets`: The assets to be withdrawn. The first item should be the currency used to to
+		///   pay the fee on the `dest` side. May not be empty.
 		/// - `fee_asset_item`: The index into `assets` of the item which should be used to pay
 		///   fees.
 		/// - `weight_limit`: The remote-side weight limit, if any, for the XCM fee purchase.
@@ -1561,13 +1567,13 @@ impl<T: Config> Pallet<T> {
 	///
 	/// - `message`: The message whose outcome should be reported.
 	/// - `responder`: The origin from which a response should be expected.
-	/// - `notify`: A dispatchable function which will be called once the outcome of `message`
-	///   is known. It may be a dispatchable in any pallet of the local chain, but other than
-	///   the usual origin, it must accept exactly two arguments: `query_id: QueryId` and
-	///   `outcome: Response`, and in that order. It should expect that the origin is
-	///   `Origin::Response` and will contain the responder's location.
-	/// - `timeout`: The block number after which it is permissible for `notify` not to be
-	///   called even if a response is received.
+	/// - `notify`: A dispatchable function which will be called once the outcome of `message` is
+	///   known. It may be a dispatchable in any pallet of the local chain, but other than the usual
+	///   origin, it must accept exactly two arguments: `query_id: QueryId` and `outcome: Response`,
+	///   and in that order. It should expect that the origin is `Origin::Response` and will contain
+	///   the responder's location.
+	/// - `timeout`: The block number after which it is permissible for `notify` not to be called
+	///   even if a response is received.
 	///
 	/// `report_outcome_notify` may return an error if the `responder` is not invertible.
 	///
@@ -2090,8 +2096,8 @@ impl<T: Config> OnResponse for Pallet<T> {
 										call_index,
 									};
 									Self::deposit_event(e);
-									// Not much to do with the result as it is. It's up to the parachain to ensure that the
-									// message makes sense.
+									// Not much to do with the result as it is. It's up to the
+									// parachain to ensure that the message makes sense.
 									error_and_info.post_info.actual_weight
 								},
 							}
@@ -2159,8 +2165,8 @@ where
 	}
 }
 
-/// Filter for `MultiLocation` to find those which represent a strict majority approval of an identified
-/// plurality.
+/// Filter for `MultiLocation` to find those which represent a strict majority approval of an
+/// identified plurality.
 ///
 /// May reasonably be used with `EnsureXcm`.
 pub struct IsMajorityOfBody<Prefix, Body>(PhantomData<(Prefix, Body)>);
@@ -2186,8 +2192,8 @@ impl<Prefix: Get<MultiLocation>, Body: Get<BodyId>> Contains<MultiLocation>
 	}
 }
 
-/// `EnsureOrigin` implementation succeeding with a `MultiLocation` value to recognize and filter the
-/// `Origin::Xcm` item.
+/// `EnsureOrigin` implementation succeeding with a `MultiLocation` value to recognize and filter
+/// the `Origin::Xcm` item.
 pub struct EnsureXcm<F>(PhantomData<F>);
 impl<O: OriginTrait + From<Origin>, F: Contains<MultiLocation>> EnsureOrigin<O> for EnsureXcm<F>
 where
diff --git a/xcm/src/double_encoded.rs b/xcm/src/double_encoded.rs
index 2c8957d9ed76..c4c1276fad8d 100644
--- a/xcm/src/double_encoded.rs
+++ b/xcm/src/double_encoded.rs
@@ -73,7 +73,8 @@ impl<T> DoubleEncoded<T> {
 
 impl<T: Decode> DoubleEncoded<T> {
 	/// Decode the inner encoded value and store it.
-	/// Returns a reference to the value in case of success and `Err(())` in case the decoding fails.
+	/// Returns a reference to the value in case of success and `Err(())` in case the decoding
+	/// fails.
 	pub fn ensure_decoded(&mut self) -> Result<&T, ()> {
 		if self.decoded.is_none() {
 			self.decoded =
@@ -92,8 +93,9 @@ impl<T: Decode> DoubleEncoded<T> {
 			.ok_or(())
 	}
 
-	/// Provides an API similar to `TryInto` that allows fallible conversion to the inner value type.
-	/// `TryInto` implementation would collide with std blanket implementation based on `TryFrom`.
+	/// Provides an API similar to `TryInto` that allows fallible conversion to the inner value
+	/// type. `TryInto` implementation would collide with std blanket implementation based on
+	/// `TryFrom`.
 	pub fn try_into(mut self) -> Result<T, ()> {
 		self.ensure_decoded()?;
 		self.decoded.ok_or(())
diff --git a/xcm/src/lib.rs b/xcm/src/lib.rs
index 2e8ea78b5c15..a012c5f53fbf 100644
--- a/xcm/src/lib.rs
+++ b/xcm/src/lib.rs
@@ -360,7 +360,8 @@ impl<Call> TryFrom<VersionedXcm<Call>> for v3::Xcm<Call> {
 	}
 }
 
-/// Convert an `Xcm` datum into a `VersionedXcm`, based on a destination `MultiLocation` which will interpret it.
+/// Convert an `Xcm` datum into a `VersionedXcm`, based on a destination `MultiLocation` which will
+/// interpret it.
 pub trait WrapVersion {
 	fn wrap_version<RuntimeCall>(
 		dest: &latest::MultiLocation,
@@ -368,7 +369,8 @@ pub trait WrapVersion {
 	) -> Result<VersionedXcm<RuntimeCall>, ()>;
 }
 
-/// `()` implementation does nothing with the XCM, just sending with whatever version it was authored as.
+/// `()` implementation does nothing with the XCM, just sending with whatever version it was
+/// authored as.
 impl WrapVersion for () {
 	fn wrap_version<RuntimeCall>(
 		_: &latest::MultiLocation,
@@ -378,7 +380,8 @@ impl WrapVersion for () {
 	}
 }
 
-/// `WrapVersion` implementation which attempts to always convert the XCM to version 2 before wrapping it.
+/// `WrapVersion` implementation which attempts to always convert the XCM to version 2 before
+/// wrapping it.
 pub struct AlwaysV2;
 impl WrapVersion for AlwaysV2 {
 	fn wrap_version<RuntimeCall>(
@@ -389,7 +392,8 @@ impl WrapVersion for AlwaysV2 {
 	}
 }
 
-/// `WrapVersion` implementation which attempts to always convert the XCM to version 3 before wrapping it.
+/// `WrapVersion` implementation which attempts to always convert the XCM to version 3 before
+/// wrapping it.
 pub struct AlwaysV3;
 impl WrapVersion for AlwaysV3 {
 	fn wrap_version<Call>(
diff --git a/xcm/src/v2/junction.rs b/xcm/src/v2/junction.rs
index be075a31fe32..73a502999462 100644
--- a/xcm/src/v2/junction.rs
+++ b/xcm/src/v2/junction.rs
@@ -32,13 +32,13 @@ pub enum Junction {
 	///
 	/// Generally used when the context is a Polkadot Relay-chain.
 	Parachain(#[codec(compact)] u32),
-	/// A 32-byte identifier for an account of a specific network that is respected as a sovereign endpoint within
-	/// the context.
+	/// A 32-byte identifier for an account of a specific network that is respected as a sovereign
+	/// endpoint within the context.
 	///
 	/// Generally used when the context is a Substrate-based chain.
 	AccountId32 { network: NetworkId, id: [u8; 32] },
-	/// An 8-byte index for an account of a specific network that is respected as a sovereign endpoint within
-	/// the context.
+	/// An 8-byte index for an account of a specific network that is respected as a sovereign
+	/// endpoint within the context.
 	///
 	/// May be used when the context is a Frame-based chain and includes e.g. an indices pallet.
 	AccountIndex64 {
@@ -46,8 +46,8 @@ pub enum Junction {
 		#[codec(compact)]
 		index: u64,
 	},
-	/// A 20-byte identifier for an account of a specific network that is respected as a sovereign endpoint within
-	/// the context.
+	/// A 20-byte identifier for an account of a specific network that is respected as a sovereign
+	/// endpoint within the context.
 	///
 	/// May be used when the context is an Ethereum or Bitcoin chain or smart-contract.
 	AccountKey20 { network: NetworkId, key: [u8; 20] },
@@ -73,8 +73,8 @@ pub enum Junction {
 	OnlyChild,
 	/// A pluralistic body existing within consensus.
 	///
-	/// Typical to be used to represent a governance origin of a chain, but could in principle be used to represent
-	/// things such as multisigs also.
+	/// Typical to be used to represent a governance origin of a chain, but could in principle be
+	/// used to represent things such as multisigs also.
 	Plurality { id: BodyId, part: BodyPart },
 }
 
diff --git a/xcm/src/v2/mod.rs b/xcm/src/v2/mod.rs
index 014942d6b679..79cc8ead89a1 100644
--- a/xcm/src/v2/mod.rs
+++ b/xcm/src/v2/mod.rs
@@ -39,11 +39,10 @@
 //! - `Order` is now obsolete and replaced entirely by `Instruction`.
 //! - `Xcm` is now a simple wrapper around a `Vec<Instruction>`.
 //! - During conversion from `Order` to `Instruction`, we do not handle `BuyExecution`s that have
-//!   nested XCMs, i.e. if the `instructions` field in the `BuyExecution` enum struct variant is
-//!   not empty, then the conversion will fail. To address this, rewrite the XCM using
-//!   `Instruction`s in chronological order.
-//! - During conversion from `Xcm` to `Instruction`, we do not handle `RelayedFrom` messages at
-//!   all.
+//!   nested XCMs, i.e. if the `instructions` field in the `BuyExecution` enum struct variant is not
+//!   empty, then the conversion will fail. To address this, rewrite the XCM using `Instruction`s in
+//!   chronological order.
+//! - During conversion from `Xcm` to `Instruction`, we do not handle `RelayedFrom` messages at all.
 //!
 //! ### XCM Pallet
 //! - The `Weigher` configuration item must have sensible weights defined for `BuyExecution` and
@@ -153,20 +152,20 @@ pub enum BodyId {
 	Executive,
 	/// The unambiguous technical body (for Polkadot, this would be the Technical Committee).
 	Technical,
-	/// The unambiguous legislative body (for Polkadot, this could be considered the opinion of a majority of
-	/// lock-voters).
+	/// The unambiguous legislative body (for Polkadot, this could be considered the opinion of a
+	/// majority of lock-voters).
 	Legislative,
-	/// The unambiguous judicial body (this doesn't exist on Polkadot, but if it were to get a "grand oracle", it
-	/// may be considered as that).
+	/// The unambiguous judicial body (this doesn't exist on Polkadot, but if it were to get a
+	/// "grand oracle", it may be considered as that).
 	Judicial,
-	/// The unambiguous defense body (for Polkadot, an opinion on the topic given via a public referendum
-	/// on the `staking_admin` track).
+	/// The unambiguous defense body (for Polkadot, an opinion on the topic given via a public
+	/// referendum on the `staking_admin` track).
 	Defense,
-	/// The unambiguous administration body (for Polkadot, an opinion on the topic given via a public referendum
-	/// on the `general_admin` track).
+	/// The unambiguous administration body (for Polkadot, an opinion on the topic given via a
+	/// public referendum on the `general_admin` track).
 	Administration,
-	/// The unambiguous treasury body (for Polkadot, an opinion on the topic given via a public referendum
-	/// on the `treasurer` track).
+	/// The unambiguous treasury body (for Polkadot, an opinion on the topic given via a public
+	/// referendum on the `treasurer` track).
 	Treasury,
 }
 
@@ -422,8 +421,8 @@ pub type Weight = u64;
 ///
 /// All messages are delivered from a known *origin*, expressed as a `MultiLocation`.
 ///
-/// This is the inner XCM format and is version-sensitive. Messages are typically passed using the outer
-/// XCM format, known as `VersionedXcm`.
+/// This is the inner XCM format and is version-sensitive. Messages are typically passed using the
+/// outer XCM format, known as `VersionedXcm`.
 #[derive(Derivative, Encode, Decode, TypeInfo, xcm_procedural::XcmWeightInfoTrait)]
 #[derivative(Clone(bound = ""), Eq(bound = ""), PartialEq(bound = ""), Debug(bound = ""))]
 #[codec(encode_bound())]
@@ -508,8 +507,8 @@ pub enum Instruction<RuntimeCall> {
 	/// - `dest`: The location whose sovereign account will own the assets and thus the effective
 	///   beneficiary for the assets and the notification target for the reserve asset deposit
 	///   message.
-	/// - `xcm`: The instructions that should follow the `ReserveAssetDeposited`
-	///   instruction, which is sent onwards to `dest`.
+	/// - `xcm`: The instructions that should follow the `ReserveAssetDeposited` instruction, which
+	///   is sent onwards to `dest`.
 	///
 	/// Safety: No concerns.
 	///
@@ -538,10 +537,11 @@ pub enum Instruction<RuntimeCall> {
 		call: DoubleEncoded<RuntimeCall>,
 	},
 
-	/// A message to notify about a new incoming HRMP channel. This message is meant to be sent by the
-	/// relay-chain to a para.
+	/// A message to notify about a new incoming HRMP channel. This message is meant to be sent by
+	/// the relay-chain to a para.
 	///
-	/// - `sender`: The sender in the to-be opened channel. Also, the initiator of the channel opening.
+	/// - `sender`: The sender in the to-be opened channel. Also, the initiator of the channel
+	///   opening.
 	/// - `max_message_size`: The maximum size of a message proposed by the sender.
 	/// - `max_capacity`: The maximum number of messages that can be queued in the channel.
 	///
@@ -558,8 +558,8 @@ pub enum Instruction<RuntimeCall> {
 	},
 
 	/// A message to notify about that a previously sent open channel request has been accepted by
-	/// the recipient. That means that the channel will be opened during the next relay-chain session
-	/// change. This message is meant to be sent by the relay-chain to a para.
+	/// the recipient. That means that the channel will be opened during the next relay-chain
+	/// session change. This message is meant to be sent by the relay-chain to a para.
 	///
 	/// Safety: The message should originate directly from the relay-chain.
 	///
@@ -573,10 +573,10 @@ pub enum Instruction<RuntimeCall> {
 		recipient: u32,
 	},
 
-	/// A message to notify that the other party in an open channel decided to close it. In particular,
-	/// `initiator` is going to close the channel opened from `sender` to the `recipient`. The close
-	/// will be enacted at the next relay-chain session change. This message is meant to be sent by
-	/// the relay-chain to a para.
+	/// A message to notify that the other party in an open channel decided to close it. In
+	/// particular, `initiator` is going to close the channel opened from `sender` to the
+	/// `recipient`. The close will be enacted at the next relay-chain session change. This message
+	/// is meant to be sent by the relay-chain to a para.
 	///
 	/// Safety: The message should originate directly from the relay-chain.
 	///
@@ -639,8 +639,8 @@ pub enum Instruction<RuntimeCall> {
 	///
 	/// - `assets`: The asset(s) to remove from holding.
 	/// - `max_assets`: The maximum number of unique assets/asset instances to remove from holding.
-	///   Only the first `max_assets` assets/instances of those matched by `assets` will be removed,
-	///   prioritized under standard asset ordering. Any others will remain in holding.
+	///   Only the first `max_assets` assets/instances of those matched by `assets` will be
+	///   removed, prioritized under standard asset ordering. Any others will remain in holding.
 	/// - `beneficiary`: The new owner for the assets.
 	///
 	/// Kind: *Instruction*
@@ -661,13 +661,13 @@ pub enum Instruction<RuntimeCall> {
 	///
 	/// - `assets`: The asset(s) to remove from holding.
 	/// - `max_assets`: The maximum number of unique assets/asset instances to remove from holding.
-	///   Only the first `max_assets` assets/instances of those matched by `assets` will be removed,
-	///   prioritized under standard asset ordering. Any others will remain in holding.
+	///   Only the first `max_assets` assets/instances of those matched by `assets` will be
+	///   removed, prioritized under standard asset ordering. Any others will remain in holding.
 	/// - `dest`: The location whose sovereign account will own the assets and thus the effective
 	///   beneficiary for the assets and the notification target for the reserve asset deposit
 	///   message.
-	/// - `xcm`: The orders that should follow the `ReserveAssetDeposited` instruction
-	///   which is sent onwards to `dest`.
+	/// - `xcm`: The orders that should follow the `ReserveAssetDeposited` instruction which is
+	///   sent onwards to `dest`.
 	///
 	/// Kind: *Instruction*
 	///
@@ -699,9 +699,9 @@ pub enum Instruction<RuntimeCall> {
 	///
 	/// - `assets`: The asset(s) to remove from holding.
 	/// - `reserve`: A valid location that acts as a reserve for all asset(s) in `assets`. The
-	///   sovereign account of this consensus system *on the reserve location* will have appropriate
-	///   assets withdrawn and `effects` will be executed on them. There will typically be only one
-	///   valid location on any given asset/chain combination.
+	///   sovereign account of this consensus system *on the reserve location* will have
+	///   appropriate assets withdrawn and `effects` will be executed on them. There will typically
+	///   be only one valid location on any given asset/chain combination.
 	/// - `xcm`: The instructions to execute on the assets once withdrawn *on the reserve
 	///   location*.
 	///
@@ -718,8 +718,8 @@ pub enum Instruction<RuntimeCall> {
 	/// - `xcm`: The instructions to execute on the assets once arrived *on the destination
 	///   location*.
 	///
-	/// NOTE: The `dest` location *MUST* respect this origin as a valid teleportation origin for all
-	/// `assets`. If it does not, then the assets may be lost.
+	/// NOTE: The `dest` location *MUST* respect this origin as a valid teleportation origin for
+	/// all `assets`. If it does not, then the assets may be lost.
 	///
 	/// Kind: *Instruction*
 	///
diff --git a/xcm/src/v2/multiasset.rs b/xcm/src/v2/multiasset.rs
index aae65dcbb54a..fdd7797a1230 100644
--- a/xcm/src/v2/multiasset.rs
+++ b/xcm/src/v2/multiasset.rs
@@ -17,11 +17,14 @@
 //! Cross-Consensus Message format asset data structures.
 //!
 //! This encompasses four types for representing assets:
-//! - `MultiAsset`: A description of a single asset, either an instance of a non-fungible or some amount of a fungible.
-//! - `MultiAssets`: A collection of `MultiAsset`s. These are stored in a `Vec` and sorted with fungibles first.
-//! - `Wild`: A single asset wildcard, this can either be "all" assets, or all assets of a specific kind.
-//! - `MultiAssetFilter`: A combination of `Wild` and `MultiAssets` designed for efficiently filtering an XCM holding
-//!   account.
+//! - `MultiAsset`: A description of a single asset, either an instance of a non-fungible or some
+//!   amount of a fungible.
+//! - `MultiAssets`: A collection of `MultiAsset`s. These are stored in a `Vec` and sorted with
+//!   fungibles first.
+//! - `Wild`: A single asset wildcard, this can either be "all" assets, or all assets of a specific
+//!   kind.
+//! - `MultiAssetFilter`: A combination of `Wild` and `MultiAssets` designed for efficiently
+//!   filtering an XCM holding account.
 
 use super::MultiLocation;
 use crate::v3::{
@@ -42,8 +45,8 @@ pub enum AssetInstance {
 	/// Undefined - used if the non-fungible asset class has only one instance.
 	Undefined,
 
-	/// A compact index. Technically this could be greater than `u128`, but this implementation supports only
-	/// values up to `2**128 - 1`.
+	/// A compact index. Technically this could be greater than `u128`, but this implementation
+	/// supports only values up to `2**128 - 1`.
 	Index(#[codec(compact)] u128),
 
 	/// A 4-byte fixed-length datum.
@@ -165,19 +168,21 @@ impl AssetId {
 		Ok(())
 	}
 
-	/// Use the value of `self` along with a `fun` fungibility specifier to create the corresponding `MultiAsset` value.
+	/// Use the value of `self` along with a `fun` fungibility specifier to create the corresponding
+	/// `MultiAsset` value.
 	pub fn into_multiasset(self, fun: Fungibility) -> MultiAsset {
 		MultiAsset { fun, id: self }
 	}
 
-	/// Use the value of `self` along with a `fun` fungibility specifier to create the corresponding `WildMultiAsset`
-	/// wildcard (`AllOf`) value.
+	/// Use the value of `self` along with a `fun` fungibility specifier to create the corresponding
+	/// `WildMultiAsset` wildcard (`AllOf`) value.
 	pub fn into_wild(self, fun: WildFungibility) -> WildMultiAsset {
 		WildMultiAsset::AllOf { fun, id: self }
 	}
 }
 
-/// Classification of whether an asset is fungible or not, along with a mandatory amount or instance.
+/// Classification of whether an asset is fungible or not, along with a mandatory amount or
+/// instance.
 #[derive(Clone, Eq, PartialEq, Ord, PartialOrd, Debug, Encode, Decode, TypeInfo)]
 #[cfg_attr(feature = "std", derive(serde::Serialize, serde::Deserialize))]
 pub enum Fungibility {
@@ -300,7 +305,8 @@ impl TryFrom<NewMultiAsset> for MultiAsset {
 	}
 }
 
-/// A `Vec` of `MultiAsset`s. There may be no duplicate fungible items in here and when decoding, they must be sorted.
+/// A `Vec` of `MultiAsset`s. There may be no duplicate fungible items in here and when decoding,
+/// they must be sorted.
 #[derive(Clone, Eq, PartialEq, Ord, PartialOrd, Debug, Encode, TypeInfo)]
 #[cfg_attr(feature = "std", derive(serde::Serialize, serde::Deserialize))]
 pub struct MultiAssets(Vec<MultiAsset>);
@@ -370,11 +376,12 @@ impl MultiAssets {
 		Self(Vec::new())
 	}
 
-	/// Create a new instance of `MultiAssets` from a `Vec<MultiAsset>` whose contents are sorted and
-	/// which contain no duplicates.
+	/// Create a new instance of `MultiAssets` from a `Vec<MultiAsset>` whose contents are sorted
+	/// and which contain no duplicates.
 	///
-	/// Returns `Ok` if the operation succeeds and `Err` if `r` is out of order or had duplicates. If you can't
-	/// guarantee that `r` is sorted and deduplicated, then use `From::<Vec<MultiAsset>>::from` which is infallible.
+	/// Returns `Ok` if the operation succeeds and `Err` if `r` is out of order or had duplicates.
+	/// If you can't guarantee that `r` is sorted and deduplicated, then use
+	/// `From::<Vec<MultiAsset>>::from` which is infallible.
 	pub fn from_sorted_and_deduplicated(r: Vec<MultiAsset>) -> Result<Self, ()> {
 		if r.is_empty() {
 			return Ok(Self(Vec::new()))
@@ -389,20 +396,22 @@ impl MultiAssets {
 		Ok(Self(r))
 	}
 
-	/// Create a new instance of `MultiAssets` from a `Vec<MultiAsset>` whose contents are sorted and
-	/// which contain no duplicates.
+	/// Create a new instance of `MultiAssets` from a `Vec<MultiAsset>` whose contents are sorted
+	/// and which contain no duplicates.
 	///
-	/// In release mode, this skips any checks to ensure that `r` is correct, making it a negligible-cost operation.
-	/// Generally though you should avoid using it unless you have a strict proof that `r` is valid.
+	/// In release mode, this skips any checks to ensure that `r` is correct, making it a
+	/// negligible-cost operation. Generally though you should avoid using it unless you have a
+	/// strict proof that `r` is valid.
 	#[cfg(test)]
 	pub fn from_sorted_and_deduplicated_skip_checks(r: Vec<MultiAsset>) -> Self {
 		Self::from_sorted_and_deduplicated(r).expect("Invalid input r is not sorted/deduped")
 	}
-	/// Create a new instance of `MultiAssets` from a `Vec<MultiAsset>` whose contents are sorted and
-	/// which contain no duplicates.
+	/// Create a new instance of `MultiAssets` from a `Vec<MultiAsset>` whose contents are sorted
+	/// and which contain no duplicates.
 	///
-	/// In release mode, this skips any checks to ensure that `r` is correct, making it a negligible-cost operation.
-	/// Generally though you should avoid using it unless you have a strict proof that `r` is valid.
+	/// In release mode, this skips any checks to ensure that `r` is correct, making it a
+	/// negligible-cost operation. Generally though you should avoid using it unless you have a
+	/// strict proof that `r` is valid.
 	///
 	/// In test mode, this checks anyway and panics on fail.
 	#[cfg(not(test))]
@@ -410,7 +419,8 @@ impl MultiAssets {
 		Self(r)
 	}
 
-	/// Add some asset onto the list, saturating. This is quite a laborious operation since it maintains the ordering.
+	/// Add some asset onto the list, saturating. This is quite a laborious operation since it
+	/// maintains the ordering.
 	pub fn push(&mut self, a: MultiAsset) {
 		if let Fungibility::Fungible(ref amount) = a.fun {
 			for asset in self.0.iter_mut().filter(|x| x.id == a.id) {
@@ -489,19 +499,19 @@ impl TryFrom<NewWildFungibility> for WildFungibility {
 #[derive(Clone, Eq, PartialEq, Ord, PartialOrd, Debug, Encode, Decode, TypeInfo)]
 #[cfg_attr(feature = "std", derive(serde::Serialize, serde::Deserialize))]
 pub enum WildMultiAsset {
-	/// All assets in the holding register, up to `usize` individual assets (different instances of non-fungibles could
-	/// be separate assets).
+	/// All assets in the holding register, up to `usize` individual assets (different instances of
+	/// non-fungibles could be separate assets).
 	All,
-	/// All assets in the holding register of a given fungibility and ID. If operating on non-fungibles, then a limit
-	/// is provided for the maximum amount of matching instances.
+	/// All assets in the holding register of a given fungibility and ID. If operating on
+	/// non-fungibles, then a limit is provided for the maximum amount of matching instances.
 	AllOf { id: AssetId, fun: WildFungibility },
 }
 
 impl WildMultiAsset {
 	/// Returns true if `self` is a super-set of the given `inner`.
 	///
-	/// Typically, any wildcard is never contained in anything else, and a wildcard can contain any other non-wildcard.
-	/// For more details, see the implementation and tests.
+	/// Typically, any wildcard is never contained in anything else, and a wildcard can contain any
+	/// other non-wildcard. For more details, see the implementation and tests.
 	pub fn contains(&self, inner: &MultiAsset) -> bool {
 		use WildMultiAsset::*;
 		match self {
@@ -565,8 +575,8 @@ impl From<MultiAssets> for MultiAssetFilter {
 impl MultiAssetFilter {
 	/// Returns true if `self` is a super-set of the given `inner`.
 	///
-	/// Typically, any wildcard is never contained in anything else, and a wildcard can contain any other non-wildcard.
-	/// For more details, see the implementation and tests.
+	/// Typically, any wildcard is never contained in anything else, and a wildcard can contain any
+	/// other non-wildcard. For more details, see the implementation and tests.
 	pub fn contains(&self, inner: &MultiAsset) -> bool {
 		match self {
 			MultiAssetFilter::Definite(ref assets) => assets.contains(inner),
diff --git a/xcm/src/v2/multilocation.rs b/xcm/src/v2/multilocation.rs
index 086a83277322..9fb74e8afb35 100644
--- a/xcm/src/v2/multilocation.rs
+++ b/xcm/src/v2/multilocation.rs
@@ -174,8 +174,8 @@ impl MultiLocation {
 		self.interior.push_front(new)
 	}
 
-	/// Consumes `self` and returns a `MultiLocation` suffixed with `new`, or an `Err` with theoriginal value of
-	/// `self` in case of overflow.
+	/// Consumes `self` and returns a `MultiLocation` suffixed with `new`, or an `Err` with
+	/// theoriginal value of `self` in case of overflow.
 	pub fn pushed_with_interior(self, new: Junction) -> result::Result<Self, (Self, Junction)> {
 		match self.interior.pushed_with(new) {
 			Ok(i) => Ok(MultiLocation { interior: i, parents: self.parents }),
@@ -183,8 +183,8 @@ impl MultiLocation {
 		}
 	}
 
-	/// Consumes `self` and returns a `MultiLocation` prefixed with `new`, or an `Err` with the original value of
-	/// `self` in case of overflow.
+	/// Consumes `self` and returns a `MultiLocation` prefixed with `new`, or an `Err` with the
+	/// original value of `self` in case of overflow.
 	pub fn pushed_front_with_interior(
 		self,
 		new: Junction,
@@ -430,7 +430,8 @@ impl From<Parent> for MultiLocation {
 	}
 }
 
-/// A tuple struct which can be converted into a `MultiLocation` of `parents` value 1 with the inner interior.
+/// A tuple struct which can be converted into a `MultiLocation` of `parents` value 1 with the inner
+/// interior.
 #[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Debug)]
 pub struct ParentThen(pub Junctions);
 impl From<ParentThen> for MultiLocation {
@@ -448,7 +449,8 @@ impl From<Ancestor> for MultiLocation {
 	}
 }
 
-/// A unit struct which can be converted into a `MultiLocation` of the inner `parents` value and the inner interior.
+/// A unit struct which can be converted into a `MultiLocation` of the inner `parents` value and the
+/// inner interior.
 #[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Debug)]
 pub struct AncestorThen<Interior>(pub u8, pub Interior);
 impl<Interior: Into<Junctions>> From<AncestorThen<Interior>> for MultiLocation {
@@ -598,8 +600,8 @@ impl Junctions {
 		}
 	}
 
-	/// Splits off the first junction, returning the remaining suffix (first item in tuple) and the first element
-	/// (second item in tuple) or `None` if it was empty.
+	/// Splits off the first junction, returning the remaining suffix (first item in tuple) and the
+	/// first element (second item in tuple) or `None` if it was empty.
 	pub fn split_first(self) -> (Junctions, Option<Junction>) {
 		match self {
 			Junctions::Here => (Junctions::Here, None),
@@ -614,8 +616,8 @@ impl Junctions {
 		}
 	}
 
-	/// Splits off the last junction, returning the remaining prefix (first item in tuple) and the last element
-	/// (second item in tuple) or `None` if it was empty.
+	/// Splits off the last junction, returning the remaining prefix (first item in tuple) and the
+	/// last element (second item in tuple) or `None` if it was empty.
 	pub fn split_last(self) -> (Junctions, Option<Junction>) {
 		match self {
 			Junctions::Here => (Junctions::Here, None),
@@ -727,7 +729,8 @@ impl Junctions {
 		}
 	}
 
-	/// Returns the junction at index `i`, or `None` if the location doesn't contain that many elements.
+	/// Returns the junction at index `i`, or `None` if the location doesn't contain that many
+	/// elements.
 	pub fn at(&self, i: usize) -> Option<&Junction> {
 		Some(match (i, self) {
 			(0, Junctions::X1(ref a)) => a,
@@ -770,8 +773,8 @@ impl Junctions {
 		})
 	}
 
-	/// Returns a mutable reference to the junction at index `i`, or `None` if the location doesn't contain that many
-	/// elements.
+	/// Returns a mutable reference to the junction at index `i`, or `None` if the location doesn't
+	/// contain that many elements.
 	pub fn at_mut(&mut self, i: usize) -> Option<&mut Junction> {
 		Some(match (i, self) {
 			(0, Junctions::X1(ref mut a)) => a,
diff --git a/xcm/src/v2/traits.rs b/xcm/src/v2/traits.rs
index 524b659d57e1..ae03cf5547ba 100644
--- a/xcm/src/v2/traits.rs
+++ b/xcm/src/v2/traits.rs
@@ -81,7 +81,8 @@ pub enum Error {
 	/// Used by `Transact` when the functor cannot be decoded.
 	#[codec(index = 17)]
 	FailedToDecode,
-	/// Used by `Transact` to indicate that the given weight limit could be breached by the functor.
+	/// Used by `Transact` to indicate that the given weight limit could be breached by the
+	/// functor.
 	#[codec(index = 18)]
 	MaxWeightInvalid,
 	/// Used by `BuyExecution` when the Holding Register does not contain payable fees.
@@ -94,7 +95,8 @@ pub enum Error {
 	#[codec(index = 21)]
 	Trap(u64),
 
-	// Errors that happen prior to instructions being executed. These fall outside of the XCM spec.
+	// Errors that happen prior to instructions being executed. These fall outside of the XCM
+	// spec.
 	/// XCM version not able to be handled.
 	UnhandledXcmVersion,
 	/// Execution of the XCM would potentially result in a greater weight used than weight limit.
@@ -161,7 +163,8 @@ pub type Result = result::Result<(), Error>;
 pub enum Outcome {
 	/// Execution completed successfully; given weight was used.
 	Complete(Weight),
-	/// Execution started, but did not complete successfully due to the given error; given weight was used.
+	/// Execution started, but did not complete successfully due to the given error; given weight
+	/// was used.
 	Incomplete(Weight, Error),
 	/// Execution did not start due to the given error.
 	Error(Error),
@@ -194,9 +197,9 @@ impl Outcome {
 
 /// Type of XCM message executor.
 pub trait ExecuteXcm<RuntimeCall> {
-	/// Execute some XCM `message` from `origin` using no more than `weight_limit` weight. The weight limit is
-	/// a basic hard-limit and the implementation may place further restrictions or requirements on weight and
-	/// other aspects.
+	/// Execute some XCM `message` from `origin` using no more than `weight_limit` weight. The
+	/// weight limit is a basic hard-limit and the implementation may place further restrictions or
+	/// requirements on weight and other aspects.
 	fn execute_xcm(
 		origin: impl Into<MultiLocation>,
 		message: Xcm<RuntimeCall>,
@@ -215,8 +218,8 @@ pub trait ExecuteXcm<RuntimeCall> {
 
 	/// Execute some XCM `message` from `origin` using no more than `weight_limit` weight.
 	///
-	/// Some amount of `weight_credit` may be provided which, depending on the implementation, may allow
-	/// execution without associated payment.
+	/// Some amount of `weight_credit` may be provided which, depending on the implementation, may
+	/// allow execution without associated payment.
 	fn execute_xcm_in_credit(
 		origin: impl Into<MultiLocation>,
 		message: Xcm<RuntimeCall>,
@@ -263,9 +266,9 @@ pub type SendResult = result::Result<(), SendError>;
 
 /// Utility for sending an XCM message.
 ///
-/// These can be amalgamated in tuples to form sophisticated routing systems. In tuple format, each router might return
-/// `NotApplicable` to pass the execution to the next sender item. Note that each `NotApplicable`
-/// might alter the destination and the XCM message for to the next router.
+/// These can be amalgamated in tuples to form sophisticated routing systems. In tuple format, each
+/// router might return `NotApplicable` to pass the execution to the next sender item. Note that
+/// each `NotApplicable` might alter the destination and the XCM message for to the next router.
 ///
 ///
 /// # Example
@@ -330,9 +333,9 @@ pub type SendResult = result::Result<(), SendError>;
 pub trait SendXcm {
 	/// Send an XCM `message` to a given `destination`.
 	///
-	/// If it is not a destination which can be reached with this type but possibly could by others, then it *MUST*
-	/// return `NotApplicable`. Any other error will cause the tuple implementation to exit early without
-	/// trying other type fields.
+	/// If it is not a destination which can be reached with this type but possibly could by others,
+	/// then it *MUST* return `NotApplicable`. Any other error will cause the tuple implementation
+	/// to exit early without trying other type fields.
 	fn send_xcm(destination: impl Into<MultiLocation>, message: Xcm<()>) -> SendResult;
 }
 
diff --git a/xcm/src/v3/junction.rs b/xcm/src/v3/junction.rs
index 5fee8d1f83bd..ae66e2b33364 100644
--- a/xcm/src/v3/junction.rs
+++ b/xcm/src/v3/junction.rs
@@ -127,20 +127,20 @@ pub enum BodyId {
 	Executive,
 	/// The unambiguous technical body (for Polkadot, this would be the Technical Committee).
 	Technical,
-	/// The unambiguous legislative body (for Polkadot, this could be considered the opinion of a majority of
-	/// lock-voters).
+	/// The unambiguous legislative body (for Polkadot, this could be considered the opinion of a
+	/// majority of lock-voters).
 	Legislative,
-	/// The unambiguous judicial body (this doesn't exist on Polkadot, but if it were to get a "grand oracle", it
-	/// may be considered as that).
+	/// The unambiguous judicial body (this doesn't exist on Polkadot, but if it were to get a
+	/// "grand oracle", it may be considered as that).
 	Judicial,
-	/// The unambiguous defense body (for Polkadot, an opinion on the topic given via a public referendum
-	/// on the `staking_admin` track).
+	/// The unambiguous defense body (for Polkadot, an opinion on the topic given via a public
+	/// referendum on the `staking_admin` track).
 	Defense,
-	/// The unambiguous administration body (for Polkadot, an opinion on the topic given via a public referendum
-	/// on the `general_admin` track).
+	/// The unambiguous administration body (for Polkadot, an opinion on the topic given via a
+	/// public referendum on the `general_admin` track).
 	Administration,
-	/// The unambiguous treasury body (for Polkadot, an opinion on the topic given via a public referendum
-	/// on the `treasurer` track).
+	/// The unambiguous treasury body (for Polkadot, an opinion on the topic given via a public
+	/// referendum on the `treasurer` track).
 	Treasury,
 }
 
@@ -266,13 +266,13 @@ pub enum Junction {
 	///
 	/// Generally used when the context is a Polkadot Relay-chain.
 	Parachain(#[codec(compact)] u32),
-	/// A 32-byte identifier for an account of a specific network that is respected as a sovereign endpoint within
-	/// the context.
+	/// A 32-byte identifier for an account of a specific network that is respected as a sovereign
+	/// endpoint within the context.
 	///
 	/// Generally used when the context is a Substrate-based chain.
 	AccountId32 { network: Option<NetworkId>, id: [u8; 32] },
-	/// An 8-byte index for an account of a specific network that is respected as a sovereign endpoint within
-	/// the context.
+	/// An 8-byte index for an account of a specific network that is respected as a sovereign
+	/// endpoint within the context.
 	///
 	/// May be used when the context is a Frame-based chain and includes e.g. an indices pallet.
 	AccountIndex64 {
@@ -280,8 +280,8 @@ pub enum Junction {
 		#[codec(compact)]
 		index: u64,
 	},
-	/// A 20-byte identifier for an account of a specific network that is respected as a sovereign endpoint within
-	/// the context.
+	/// A 20-byte identifier for an account of a specific network that is respected as a sovereign
+	/// endpoint within the context.
 	///
 	/// May be used when the context is an Ethereum or Bitcoin chain or smart-contract.
 	AccountKey20 { network: Option<NetworkId>, key: [u8; 20] },
@@ -310,8 +310,8 @@ pub enum Junction {
 	OnlyChild,
 	/// A pluralistic body existing within consensus.
 	///
-	/// Typical to be used to represent a governance origin of a chain, but could in principle be used to represent
-	/// things such as multisigs also.
+	/// Typical to be used to represent a governance origin of a chain, but could in principle be
+	/// used to represent things such as multisigs also.
 	Plurality { id: BodyId, part: BodyPart },
 	/// A global network capable of externalizing its own consensus. This is not generally
 	/// meaningful outside of the universal level.
@@ -413,7 +413,8 @@ impl Junction {
 
 	/// Convert `self` into a `MultiLocation` containing `n` parents.
 	///
-	/// Similar to `Self::into_location`, with the added ability to specify the number of parent junctions.
+	/// Similar to `Self::into_location`, with the added ability to specify the number of parent
+	/// junctions.
 	pub const fn into_exterior(self, n: u8) -> MultiLocation {
 		MultiLocation { parents: n, interior: Junctions::X1(self) }
 	}
diff --git a/xcm/src/v3/junctions.rs b/xcm/src/v3/junctions.rs
index da06cdbdad67..201a80fb7658 100644
--- a/xcm/src/v3/junctions.rs
+++ b/xcm/src/v3/junctions.rs
@@ -137,7 +137,8 @@ impl Junctions {
 
 	/// Convert `self` into a `MultiLocation` containing `n` parents.
 	///
-	/// Similar to `Self::into_location`, with the added ability to specify the number of parent junctions.
+	/// Similar to `Self::into_location`, with the added ability to specify the number of parent
+	/// junctions.
 	pub const fn into_exterior(self, n: u8) -> MultiLocation {
 		MultiLocation { parents: n, interior: self }
 	}
@@ -309,8 +310,8 @@ impl Junctions {
 		}
 	}
 
-	/// Splits off the first junction, returning the remaining suffix (first item in tuple) and the first element
-	/// (second item in tuple) or `None` if it was empty.
+	/// Splits off the first junction, returning the remaining suffix (first item in tuple) and the
+	/// first element (second item in tuple) or `None` if it was empty.
 	pub fn split_first(self) -> (Junctions, Option<Junction>) {
 		match self {
 			Junctions::Here => (Junctions::Here, None),
@@ -325,8 +326,8 @@ impl Junctions {
 		}
 	}
 
-	/// Splits off the last junction, returning the remaining prefix (first item in tuple) and the last element
-	/// (second item in tuple) or `None` if it was empty.
+	/// Splits off the last junction, returning the remaining prefix (first item in tuple) and the
+	/// last element (second item in tuple) or `None` if it was empty.
 	pub fn split_last(self) -> (Junctions, Option<Junction>) {
 		match self {
 			Junctions::Here => (Junctions::Here, None),
@@ -469,7 +470,8 @@ impl Junctions {
 		}
 	}
 
-	/// Returns the junction at index `i`, or `None` if the location doesn't contain that many elements.
+	/// Returns the junction at index `i`, or `None` if the location doesn't contain that many
+	/// elements.
 	pub fn at(&self, i: usize) -> Option<&Junction> {
 		Some(match (i, self) {
 			(0, Junctions::X1(ref a)) => a,
@@ -512,8 +514,8 @@ impl Junctions {
 		})
 	}
 
-	/// Returns a mutable reference to the junction at index `i`, or `None` if the location doesn't contain that many
-	/// elements.
+	/// Returns a mutable reference to the junction at index `i`, or `None` if the location doesn't
+	/// contain that many elements.
 	pub fn at_mut(&mut self, i: usize) -> Option<&mut Junction> {
 		Some(match (i, self) {
 			(0, Junctions::X1(ref mut a)) => a,
diff --git a/xcm/src/v3/mod.rs b/xcm/src/v3/mod.rs
index 772ad48ac4b2..3614dc22550d 100644
--- a/xcm/src/v3/mod.rs
+++ b/xcm/src/v3/mod.rs
@@ -367,8 +367,8 @@ impl XcmContext {
 ///
 /// All messages are delivered from a known *origin*, expressed as a `MultiLocation`.
 ///
-/// This is the inner XCM format and is version-sensitive. Messages are typically passed using the outer
-/// XCM format, known as `VersionedXcm`.
+/// This is the inner XCM format and is version-sensitive. Messages are typically passed using the
+/// outer XCM format, known as `VersionedXcm`.
 #[derive(Derivative, Encode, Decode, TypeInfo, xcm_procedural::XcmWeightInfoTrait)]
 #[derivative(Clone(bound = ""), Eq(bound = ""), PartialEq(bound = ""), Debug(bound = ""))]
 #[codec(encode_bound())]
@@ -417,9 +417,8 @@ pub enum Instruction<Call> {
 	/// - `response`: The message content.
 	/// - `max_weight`: The maximum weight that handling this response should take.
 	/// - `querier`: The location responsible for the initiation of the response, if there is one.
-	///   In general this will tend to be the same location as the receiver of this message.
-	///   NOTE: As usual, this is interpreted from the perspective of the receiving consensus
-	///   system.
+	///   In general this will tend to be the same location as the receiver of this message. NOTE:
+	///   As usual, this is interpreted from the perspective of the receiving consensus system.
 	///
 	/// Safety: Since this is information only, there are no immediate concerns. However, it should
 	/// be remembered that even if the Origin behaves reasonably, it can always be asked to make
@@ -460,8 +459,8 @@ pub enum Instruction<Call> {
 	/// - `dest`: The location whose sovereign account will own the assets and thus the effective
 	///   beneficiary for the assets and the notification target for the reserve asset deposit
 	///   message.
-	/// - `xcm`: The instructions that should follow the `ReserveAssetDeposited`
-	///   instruction, which is sent onwards to `dest`.
+	/// - `xcm`: The instructions that should follow the `ReserveAssetDeposited` instruction, which
+	///   is sent onwards to `dest`.
 	///
 	/// Safety: No concerns.
 	///
@@ -487,10 +486,11 @@ pub enum Instruction<Call> {
 	/// Errors:
 	Transact { origin_kind: OriginKind, require_weight_at_most: Weight, call: DoubleEncoded<Call> },
 
-	/// A message to notify about a new incoming HRMP channel. This message is meant to be sent by the
-	/// relay-chain to a para.
+	/// A message to notify about a new incoming HRMP channel. This message is meant to be sent by
+	/// the relay-chain to a para.
 	///
-	/// - `sender`: The sender in the to-be opened channel. Also, the initiator of the channel opening.
+	/// - `sender`: The sender in the to-be opened channel. Also, the initiator of the channel
+	///   opening.
 	/// - `max_message_size`: The maximum size of a message proposed by the sender.
 	/// - `max_capacity`: The maximum number of messages that can be queued in the channel.
 	///
@@ -507,8 +507,8 @@ pub enum Instruction<Call> {
 	},
 
 	/// A message to notify about that a previously sent open channel request has been accepted by
-	/// the recipient. That means that the channel will be opened during the next relay-chain session
-	/// change. This message is meant to be sent by the relay-chain to a para.
+	/// the recipient. That means that the channel will be opened during the next relay-chain
+	/// session change. This message is meant to be sent by the relay-chain to a para.
 	///
 	/// Safety: The message should originate directly from the relay-chain.
 	///
@@ -522,10 +522,10 @@ pub enum Instruction<Call> {
 		recipient: u32,
 	},
 
-	/// A message to notify that the other party in an open channel decided to close it. In particular,
-	/// `initiator` is going to close the channel opened from `sender` to the `recipient`. The close
-	/// will be enacted at the next relay-chain session change. This message is meant to be sent by
-	/// the relay-chain to a para.
+	/// A message to notify that the other party in an open channel decided to close it. In
+	/// particular, `initiator` is going to close the channel opened from `sender` to the
+	/// `recipient`. The close will be enacted at the next relay-chain session change. This message
+	/// is meant to be sent by the relay-chain to a para.
 	///
 	/// Safety: The message should originate directly from the relay-chain.
 	///
@@ -593,8 +593,8 @@ pub enum Instruction<Call> {
 	/// - `dest`: The location whose sovereign account will own the assets and thus the effective
 	///   beneficiary for the assets and the notification target for the reserve asset deposit
 	///   message.
-	/// - `xcm`: The orders that should follow the `ReserveAssetDeposited` instruction
-	///   which is sent onwards to `dest`.
+	/// - `xcm`: The orders that should follow the `ReserveAssetDeposited` instruction which is
+	///   sent onwards to `dest`.
 	///
 	/// Kind: *Instruction*
 	///
@@ -623,9 +623,9 @@ pub enum Instruction<Call> {
 	///
 	/// - `assets`: The asset(s) to remove from holding.
 	/// - `reserve`: A valid location that acts as a reserve for all asset(s) in `assets`. The
-	///   sovereign account of this consensus system *on the reserve location* will have appropriate
-	///   assets withdrawn and `effects` will be executed on them. There will typically be only one
-	///   valid location on any given asset/chain combination.
+	///   sovereign account of this consensus system *on the reserve location* will have
+	///   appropriate assets withdrawn and `effects` will be executed on them. There will typically
+	///   be only one valid location on any given asset/chain combination.
 	/// - `xcm`: The instructions to execute on the assets once withdrawn *on the reserve
 	///   location*.
 	///
@@ -642,8 +642,8 @@ pub enum Instruction<Call> {
 	/// - `xcm`: The instructions to execute on the assets once arrived *on the destination
 	///   location*.
 	///
-	/// NOTE: The `dest` location *MUST* respect this origin as a valid teleportation origin for all
-	/// `assets`. If it does not, then the assets may be lost.
+	/// NOTE: The `dest` location *MUST* respect this origin as a valid teleportation origin for
+	/// all `assets`. If it does not, then the assets may be lost.
 	///
 	/// Kind: *Instruction*
 	///
@@ -809,7 +809,8 @@ pub enum Instruction<Call> {
 	/// Kind: *Instruction*
 	///
 	/// Errors:
-	/// - `ExpectationFalse`: If the value of the Transact Status Register is not equal to the parameter.
+	/// - `ExpectationFalse`: If the value of the Transact Status Register is not equal to the
+	///   parameter.
 	ExpectTransactStatus(MaybeErrorCode),
 
 	/// Query the existence of a particular pallet type.
@@ -830,11 +831,15 @@ pub enum Instruction<Call> {
 
 	/// Ensure that a particular pallet with a particular version exists.
 	///
-	/// - `index: Compact`: The index which identifies the pallet. An error if no pallet exists at this index.
+	/// - `index: Compact`: The index which identifies the pallet. An error if no pallet exists at
+	///   this index.
 	/// - `name: Vec<u8>`: Name which must be equal to the name of the pallet.
-	/// - `module_name: Vec<u8>`: Module name which must be equal to the name of the module in which the pallet exists.
-	/// - `crate_major: Compact`: Version number which must be equal to the major version of the crate which implements the pallet.
-	/// - `min_crate_minor: Compact`: Version number which must be at most the minor version of the crate which implements the pallet.
+	/// - `module_name: Vec<u8>`: Module name which must be equal to the name of the module in
+	///   which the pallet exists.
+	/// - `crate_major: Compact`: Version number which must be equal to the major version of the
+	///   crate which implements the pallet.
+	/// - `min_crate_minor: Compact`: Version number which must be at most the minor version of the
+	///   crate which implements the pallet.
 	///
 	/// Safety: No concerns.
 	///
@@ -961,8 +966,8 @@ pub enum Instruction<Call> {
 	/// of course, if there is no record that the asset actually is locked.
 	///
 	/// - `asset`: The asset(s) to be unlocked.
-	/// - `locker`: The location from which a previous `NoteUnlockable` was sent and to which
-	///   an `UnlockAsset` should be sent.
+	/// - `locker`: The location from which a previous `NoteUnlockable` was sent and to which an
+	///   `UnlockAsset` should be sent.
 	///
 	/// Kind: *Instruction*.
 	///
@@ -971,8 +976,8 @@ pub enum Instruction<Call> {
 
 	/// Sets the Fees Mode Register.
 	///
-	/// - `jit_withdraw`: The fees mode item; if set to `true` then fees for any instructions
-	///   are withdrawn as needed using the same mechanism as `WithdrawAssets`.
+	/// - `jit_withdraw`: The fees mode item; if set to `true` then fees for any instructions are
+	///   withdrawn as needed using the same mechanism as `WithdrawAssets`.
 	///
 	/// Kind: *Instruction*.
 	///
diff --git a/xcm/src/v3/multiasset.rs b/xcm/src/v3/multiasset.rs
index a4900a71539a..1668d1b870dc 100644
--- a/xcm/src/v3/multiasset.rs
+++ b/xcm/src/v3/multiasset.rs
@@ -17,11 +17,14 @@
 //! Cross-Consensus Message format asset data structures.
 //!
 //! This encompasses four types for representing assets:
-//! - `MultiAsset`: A description of a single asset, either an instance of a non-fungible or some amount of a fungible.
-//! - `MultiAssets`: A collection of `MultiAsset`s. These are stored in a `Vec` and sorted with fungibles first.
-//! - `Wild`: A single asset wildcard, this can either be "all" assets, or all assets of a specific kind.
-//! - `MultiAssetFilter`: A combination of `Wild` and `MultiAssets` designed for efficiently filtering an XCM holding
-//!   account.
+//! - `MultiAsset`: A description of a single asset, either an instance of a non-fungible or some
+//!   amount of a fungible.
+//! - `MultiAssets`: A collection of `MultiAsset`s. These are stored in a `Vec` and sorted with
+//!   fungibles first.
+//! - `Wild`: A single asset wildcard, this can either be "all" assets, or all assets of a specific
+//!   kind.
+//! - `MultiAssetFilter`: A combination of `Wild` and `MultiAssets` designed for efficiently
+//!   filtering an XCM holding account.
 
 use super::{InteriorMultiLocation, MultiLocation};
 use crate::v2::{
@@ -47,8 +50,8 @@ pub enum AssetInstance {
 	/// Undefined - used if the non-fungible asset class has only one instance.
 	Undefined,
 
-	/// A compact index. Technically this could be greater than `u128`, but this implementation supports only
-	/// values up to `2**128 - 1`.
+	/// A compact index. Technically this could be greater than `u128`, but this implementation
+	/// supports only values up to `2**128 - 1`.
 	Index(#[codec(compact)] u128),
 
 	/// A 4-byte fixed-length datum.
@@ -234,7 +237,8 @@ impl TryFrom<AssetInstance> for u128 {
 	}
 }
 
-/// Classification of whether an asset is fungible or not, along with a mandatory amount or instance.
+/// Classification of whether an asset is fungible or not, along with a mandatory amount or
+/// instance.
 #[derive(Clone, Eq, PartialEq, Ord, PartialOrd, Debug, Encode, TypeInfo, MaxEncodedLen)]
 #[cfg_attr(feature = "std", derive(serde::Serialize, serde::Deserialize))]
 pub enum Fungibility {
@@ -387,13 +391,14 @@ impl AssetId {
 		Ok(())
 	}
 
-	/// Use the value of `self` along with a `fun` fungibility specifier to create the corresponding `MultiAsset` value.
+	/// Use the value of `self` along with a `fun` fungibility specifier to create the corresponding
+	/// `MultiAsset` value.
 	pub fn into_multiasset(self, fun: Fungibility) -> MultiAsset {
 		MultiAsset { fun, id: self }
 	}
 
-	/// Use the value of `self` along with a `fun` fungibility specifier to create the corresponding `WildMultiAsset`
-	/// wildcard (`AllOf`) value.
+	/// Use the value of `self` along with a `fun` fungibility specifier to create the corresponding
+	/// `WildMultiAsset` wildcard (`AllOf`) value.
 	pub fn into_wild(self, fun: WildFungibility) -> WildMultiAsset {
 		WildMultiAsset::AllOf { fun, id: self }
 	}
@@ -576,11 +581,12 @@ impl MultiAssets {
 		Self(Vec::new())
 	}
 
-	/// Create a new instance of `MultiAssets` from a `Vec<MultiAsset>` whose contents are sorted and
-	/// which contain no duplicates.
+	/// Create a new instance of `MultiAssets` from a `Vec<MultiAsset>` whose contents are sorted
+	/// and which contain no duplicates.
 	///
-	/// Returns `Ok` if the operation succeeds and `Err` if `r` is out of order or had duplicates. If you can't
-	/// guarantee that `r` is sorted and deduplicated, then use `From::<Vec<MultiAsset>>::from` which is infallible.
+	/// Returns `Ok` if the operation succeeds and `Err` if `r` is out of order or had duplicates.
+	/// If you can't guarantee that `r` is sorted and deduplicated, then use
+	/// `From::<Vec<MultiAsset>>::from` which is infallible.
 	pub fn from_sorted_and_deduplicated(r: Vec<MultiAsset>) -> Result<Self, ()> {
 		if r.is_empty() {
 			return Ok(Self(Vec::new()))
@@ -595,20 +601,22 @@ impl MultiAssets {
 		Ok(Self(r))
 	}
 
-	/// Create a new instance of `MultiAssets` from a `Vec<MultiAsset>` whose contents are sorted and
-	/// which contain no duplicates.
+	/// Create a new instance of `MultiAssets` from a `Vec<MultiAsset>` whose contents are sorted
+	/// and which contain no duplicates.
 	///
-	/// In release mode, this skips any checks to ensure that `r` is correct, making it a negligible-cost operation.
-	/// Generally though you should avoid using it unless you have a strict proof that `r` is valid.
+	/// In release mode, this skips any checks to ensure that `r` is correct, making it a
+	/// negligible-cost operation. Generally though you should avoid using it unless you have a
+	/// strict proof that `r` is valid.
 	#[cfg(test)]
 	pub fn from_sorted_and_deduplicated_skip_checks(r: Vec<MultiAsset>) -> Self {
 		Self::from_sorted_and_deduplicated(r).expect("Invalid input r is not sorted/deduped")
 	}
-	/// Create a new instance of `MultiAssets` from a `Vec<MultiAsset>` whose contents are sorted and
-	/// which contain no duplicates.
+	/// Create a new instance of `MultiAssets` from a `Vec<MultiAsset>` whose contents are sorted
+	/// and which contain no duplicates.
 	///
-	/// In release mode, this skips any checks to ensure that `r` is correct, making it a negligible-cost operation.
-	/// Generally though you should avoid using it unless you have a strict proof that `r` is valid.
+	/// In release mode, this skips any checks to ensure that `r` is correct, making it a
+	/// negligible-cost operation. Generally though you should avoid using it unless you have a
+	/// strict proof that `r` is valid.
 	///
 	/// In test mode, this checks anyway and panics on fail.
 	#[cfg(not(test))]
@@ -616,7 +624,8 @@ impl MultiAssets {
 		Self(r)
 	}
 
-	/// Add some asset onto the list, saturating. This is quite a laborious operation since it maintains the ordering.
+	/// Add some asset onto the list, saturating. This is quite a laborious operation since it
+	/// maintains the ordering.
 	pub fn push(&mut self, a: MultiAsset) {
 		for asset in self.0.iter_mut().filter(|x| x.id == a.id) {
 			match (&a.fun, &mut asset.fun) {
diff --git a/xcm/src/v3/multilocation.rs b/xcm/src/v3/multilocation.rs
index 09d547503f1c..07f829d014c0 100644
--- a/xcm/src/v3/multilocation.rs
+++ b/xcm/src/v3/multilocation.rs
@@ -198,8 +198,8 @@ impl MultiLocation {
 		self.interior.push_front(new)
 	}
 
-	/// Consumes `self` and returns a `MultiLocation` suffixed with `new`, or an `Err` with theoriginal value of
-	/// `self` in case of overflow.
+	/// Consumes `self` and returns a `MultiLocation` suffixed with `new`, or an `Err` with
+	/// theoriginal value of `self` in case of overflow.
 	pub fn pushed_with_interior(
 		self,
 		new: impl Into<Junction>,
@@ -210,8 +210,8 @@ impl MultiLocation {
 		}
 	}
 
-	/// Consumes `self` and returns a `MultiLocation` prefixed with `new`, or an `Err` with the original value of
-	/// `self` in case of overflow.
+	/// Consumes `self` and returns a `MultiLocation` prefixed with `new`, or an `Err` with the
+	/// original value of `self` in case of overflow.
 	pub fn pushed_front_with_interior(
 		self,
 		new: impl Into<Junction>,
@@ -472,7 +472,8 @@ impl From<Parent> for MultiLocation {
 	}
 }
 
-/// A tuple struct which can be converted into a `MultiLocation` of `parents` value 1 with the inner interior.
+/// A tuple struct which can be converted into a `MultiLocation` of `parents` value 1 with the inner
+/// interior.
 #[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Debug)]
 pub struct ParentThen(pub Junctions);
 impl From<ParentThen> for MultiLocation {
@@ -490,7 +491,8 @@ impl From<Ancestor> for MultiLocation {
 	}
 }
 
-/// A unit struct which can be converted into a `MultiLocation` of the inner `parents` value and the inner interior.
+/// A unit struct which can be converted into a `MultiLocation` of the inner `parents` value and the
+/// inner interior.
 #[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Debug)]
 pub struct AncestorThen<Interior>(pub u8, pub Interior);
 impl<Interior: Into<Junctions>> From<AncestorThen<Interior>> for MultiLocation {
diff --git a/xcm/src/v3/traits.rs b/xcm/src/v3/traits.rs
index 966fb724ed11..128be42c2a2b 100644
--- a/xcm/src/v3/traits.rs
+++ b/xcm/src/v3/traits.rs
@@ -86,7 +86,8 @@ pub enum Error {
 	/// Used by `Transact` when the functor cannot be decoded.
 	#[codec(index = 17)]
 	FailedToDecode,
-	/// Used by `Transact` to indicate that the given weight limit could be breached by the functor.
+	/// Used by `Transact` to indicate that the given weight limit could be breached by the
+	/// functor.
 	#[codec(index = 18)]
 	MaxWeightInvalid,
 	/// Used by `BuyExecution` when the Holding Register does not contain payable fees.
@@ -138,7 +139,8 @@ pub enum Error {
 	#[codec(index = 34)]
 	NotDepositable,
 
-	// Errors that happen prior to instructions being executed. These fall outside of the XCM spec.
+	// Errors that happen prior to instructions being executed. These fall outside of the XCM
+	// spec.
 	/// XCM version not able to be handled.
 	UnhandledXcmVersion,
 	/// Execution of the XCM would potentially result in a greater weight used than weight limit.
@@ -263,7 +265,8 @@ impl From<Error> for Outcome {
 pub enum Outcome {
 	/// Execution completed successfully; given weight was used.
 	Complete(Weight),
-	/// Execution started, but did not complete successfully due to the given error; given weight was used.
+	/// Execution started, but did not complete successfully due to the given error; given weight
+	/// was used.
 	Incomplete(Weight, Error),
 	/// Execution did not start due to the given error.
 	Error(Error),
diff --git a/xcm/xcm-builder/src/asset_conversion.rs b/xcm/xcm-builder/src/asset_conversion.rs
index 583231d792dd..2fe26e8cd1e3 100644
--- a/xcm/xcm-builder/src/asset_conversion.rs
+++ b/xcm/xcm-builder/src/asset_conversion.rs
@@ -22,9 +22,9 @@ use sp_std::{marker::PhantomData, prelude::*, result};
 use xcm::latest::prelude::*;
 use xcm_executor::traits::{Error as MatchError, MatchesFungibles, MatchesNonFungibles};
 
-/// Converter struct implementing `AssetIdConversion` converting a numeric asset ID (must be `TryFrom/TryInto<u128>`) into
-/// a `GeneralIndex` junction, prefixed by some `MultiLocation` value. The `MultiLocation` value will typically be a
-/// `PalletInstance` junction.
+/// Converter struct implementing `AssetIdConversion` converting a numeric asset ID (must be
+/// `TryFrom/TryInto<u128>`) into a `GeneralIndex` junction, prefixed by some `MultiLocation` value.
+/// The `MultiLocation` value will typically be a `PalletInstance` junction.
 pub struct AsPrefixedGeneralIndex<Prefix, AssetId, ConvertAssetId>(
 	PhantomData<(Prefix, AssetId, ConvertAssetId)>,
 );
diff --git a/xcm/xcm-builder/src/currency_adapter.rs b/xcm/xcm-builder/src/currency_adapter.rs
index 32db840858a9..4dbd4fe8bcd0 100644
--- a/xcm/xcm-builder/src/currency_adapter.rs
+++ b/xcm/xcm-builder/src/currency_adapter.rs
@@ -44,8 +44,8 @@ impl From<Error> for XcmError {
 	}
 }
 
-/// Simple adapter to use a currency as asset transactor. This type can be used as `type AssetTransactor` in
-/// `xcm::Config`.
+/// Simple adapter to use a currency as asset transactor. This type can be used as `type
+/// AssetTransactor` in `xcm::Config`.
 ///
 /// # Example
 /// ```
diff --git a/xcm/xcm-builder/src/fungibles_adapter.rs b/xcm/xcm-builder/src/fungibles_adapter.rs
index bcb0e9c870b3..d7fded01e2db 100644
--- a/xcm/xcm-builder/src/fungibles_adapter.rs
+++ b/xcm/xcm-builder/src/fungibles_adapter.rs
@@ -63,8 +63,8 @@ impl<
 /// The location which is allowed to mint a particular asset.
 #[derive(Copy, Clone, Eq, PartialEq)]
 pub enum MintLocation {
-	/// This chain is allowed to mint the asset. When we track teleports of the asset we ensure that
-	/// no more of the asset returns back to the chain than has been sent out.
+	/// This chain is allowed to mint the asset. When we track teleports of the asset we ensure
+	/// that no more of the asset returns back to the chain than has been sent out.
 	Local,
 	/// This chain is not allowed to mint the asset. When we track teleports of the asset we ensure
 	/// that no more of the asset is sent out from the chain than has been previously received.
diff --git a/xcm/xcm-builder/src/location_conversion.rs b/xcm/xcm-builder/src/location_conversion.rs
index ccc3cc040e61..26b48fc88adc 100644
--- a/xcm/xcm-builder/src/location_conversion.rs
+++ b/xcm/xcm-builder/src/location_conversion.rs
@@ -345,10 +345,11 @@ impl<Network: Get<Option<NetworkId>>, AccountId: From<[u8; 20]> + Into<[u8; 20]>
 	}
 }
 
-/// Converts a location which is a top-level relay chain (which provides its own consensus) into a 32-byte `AccountId`.
+/// Converts a location which is a top-level relay chain (which provides its own consensus) into a
+/// 32-byte `AccountId`.
 ///
-/// This will always result in the *same account ID* being returned for the same Relay-chain, regardless of the relative security of
-/// this Relay-chain compared to the local chain.
+/// This will always result in the *same account ID* being returned for the same Relay-chain,
+/// regardless of the relative security of this Relay-chain compared to the local chain.
 ///
 /// Note: No distinction is made between the cases when the given `UniversalLocation` lies within
 /// the same consensus system (i.e. is itself or a parent) and when it is a foreign consensus
diff --git a/xcm/xcm-builder/src/origin_aliases.rs b/xcm/xcm-builder/src/origin_aliases.rs
index 12bcdad3dfea..82c5f71b7a12 100644
--- a/xcm/xcm-builder/src/origin_aliases.rs
+++ b/xcm/xcm-builder/src/origin_aliases.rs
@@ -20,7 +20,8 @@ use frame_support::traits::{Contains, ContainsPair};
 use sp_std::marker::PhantomData;
 use xcm::latest::prelude::*;
 
-/// Alias a Foreign `AccountId32` with a local `AccountId32` if the foreign `AccountId32` matches the `Prefix` pattern.
+/// Alias a Foreign `AccountId32` with a local `AccountId32` if the foreign `AccountId32` matches
+/// the `Prefix` pattern.
 ///
 /// Requires that the prefixed origin `AccountId32` matches the target `AccountId32`.
 pub struct AliasForeignAccountId32<Prefix>(PhantomData<Prefix>);
diff --git a/xcm/xcm-builder/src/origin_conversion.rs b/xcm/xcm-builder/src/origin_conversion.rs
index 0810b1ce2f8b..112b26869a99 100644
--- a/xcm/xcm-builder/src/origin_conversion.rs
+++ b/xcm/xcm-builder/src/origin_conversion.rs
@@ -24,7 +24,8 @@ use sp_std::marker::PhantomData;
 use xcm::latest::{BodyId, BodyPart, Junction, Junctions::*, MultiLocation, NetworkId, OriginKind};
 use xcm_executor::traits::{ConvertLocation, ConvertOrigin};
 
-/// Sovereign accounts use the system's `Signed` origin with an account ID derived from the `LocationConverter`.
+/// Sovereign accounts use the system's `Signed` origin with an account ID derived from the
+/// `LocationConverter`.
 pub struct SovereignSignedViaLocation<LocationConverter, RuntimeOrigin>(
 	PhantomData<(LocationConverter, RuntimeOrigin)>,
 );
@@ -269,10 +270,11 @@ where
 	}
 }
 
-/// `Convert` implementation to convert from some a `Signed` (system) `Origin` into an `AccountId32`.
+/// `Convert` implementation to convert from some a `Signed` (system) `Origin` into an
+/// `AccountId32`.
 ///
-/// Typically used when configuring `pallet-xcm` for allowing normal accounts to dispatch an XCM from an `AccountId32`
-/// origin.
+/// Typically used when configuring `pallet-xcm` for allowing normal accounts to dispatch an XCM
+/// from an `AccountId32` origin.
 pub struct SignedToAccountId32<RuntimeOrigin, AccountId, Network>(
 	PhantomData<(RuntimeOrigin, AccountId, Network)>,
 );
@@ -296,11 +298,11 @@ where
 	}
 }
 
-/// `Convert` implementation to convert from some an origin which implements `Backing` into a corresponding `Plurality`
-/// `MultiLocation`.
+/// `Convert` implementation to convert from some an origin which implements `Backing` into a
+/// corresponding `Plurality` `MultiLocation`.
 ///
-/// Typically used when configuring `pallet-xcm` for allowing a collective's Origin to dispatch an XCM from a
-/// `Plurality` origin.
+/// Typically used when configuring `pallet-xcm` for allowing a collective's Origin to dispatch an
+/// XCM from a `Plurality` origin.
 pub struct BackingToPlurality<RuntimeOrigin, COrigin, Body>(
 	PhantomData<(RuntimeOrigin, COrigin, Body)>,
 );
diff --git a/xcm/xcm-builder/src/tests/assets.rs b/xcm/xcm-builder/src/tests/assets.rs
index 9b8ba0e459de..dbcb731a1bda 100644
--- a/xcm/xcm-builder/src/tests/assets.rs
+++ b/xcm/xcm-builder/src/tests/assets.rs
@@ -396,7 +396,8 @@ fn max_assets_limit_should_work() {
 	);
 	assert_eq!(r, Outcome::Incomplete(Weight::from_parts(95, 95), XcmError::HoldingWouldOverflow));
 
-	// Attempt to withdraw 4 different assets and then the same 4 and then a different 4 will succeed.
+	// Attempt to withdraw 4 different assets and then the same 4 and then a different 4 will
+	// succeed.
 	let message = Xcm(vec![
 		WithdrawAsset(([1u8; 32], 100u128).into()),
 		WithdrawAsset(([2u8; 32], 100u128).into()),
diff --git a/xcm/xcm-builder/src/tests/bridging/paid_remote_relay_relay.rs b/xcm/xcm-builder/src/tests/bridging/paid_remote_relay_relay.rs
index 2f9bfcc2d80a..6870413c38d5 100644
--- a/xcm/xcm-builder/src/tests/bridging/paid_remote_relay_relay.rs
+++ b/xcm/xcm-builder/src/tests/bridging/paid_remote_relay_relay.rs
@@ -80,7 +80,8 @@ fn sending_to_bridged_chain_works() {
 		)];
 		assert_eq!(take_received_remote_messages(), expected);
 
-		// The export cost 50 ref time and 50 proof size weight units (and thus 100 units of balance).
+		// The export cost 50 ref time and 50 proof size weight units (and thus 100 units of
+		// balance).
 		assert_eq!(asset_list(Parachain(100)), vec![(Here, 1000u128 - price).into()]);
 
 		let entry = LogEntry {
@@ -154,7 +155,8 @@ fn sending_to_parachain_of_bridged_chain_works() {
 		)];
 		assert_eq!(take_received_remote_messages(), expected);
 
-		// The export cost 50 ref time and 50 proof size weight units (and thus 100 units of balance).
+		// The export cost 50 ref time and 50 proof size weight units (and thus 100 units of
+		// balance).
 		assert_eq!(asset_list(Parachain(100)), vec![(Here, 1000u128 - price).into()]);
 
 		let entry = LogEntry {
diff --git a/xcm/xcm-builder/src/tests/mock.rs b/xcm/xcm-builder/src/tests/mock.rs
index 66a676369a67..aea780b84367 100644
--- a/xcm/xcm-builder/src/tests/mock.rs
+++ b/xcm/xcm-builder/src/tests/mock.rs
@@ -60,8 +60,8 @@ pub enum TestOrigin {
 
 /// A dummy call.
 ///
-/// Each item contains the amount of weight that it *wants* to consume as the first item, and the actual amount (if
-/// different from the former) in the second option.
+/// Each item contains the amount of weight that it *wants* to consume as the first item, and the
+/// actual amount (if different from the former) in the second option.
 #[derive(Debug, Encode, Decode, Eq, PartialEq, Clone, Copy, scale_info::TypeInfo)]
 pub enum TestCall {
 	OnlyRoot(Weight, Option<Weight>),
diff --git a/xcm/xcm-builder/src/tests/querying.rs b/xcm/xcm-builder/src/tests/querying.rs
index be8edfe87b8d..8fbb55eb2542 100644
--- a/xcm/xcm-builder/src/tests/querying.rs
+++ b/xcm/xcm-builder/src/tests/querying.rs
@@ -95,7 +95,8 @@ fn pallet_query_with_results_should_work() {
 #[test]
 fn prepaid_result_of_query_should_get_free_execution() {
 	let query_id = 33;
-	// We put this in manually here, but normally this would be done at the point of crafting the message.
+	// We put this in manually here, but normally this would be done at the point of crafting the
+	// message.
 	expect_response(query_id, Parent.into());
 
 	let the_response = Response::Assets((Parent, 100u128).into());
diff --git a/xcm/xcm-builder/src/universal_exports.rs b/xcm/xcm-builder/src/universal_exports.rs
index 9a65ec7dfe40..0ee627e0ee90 100644
--- a/xcm/xcm-builder/src/universal_exports.rs
+++ b/xcm/xcm-builder/src/universal_exports.rs
@@ -300,7 +300,8 @@ pub trait HaulBlob {
 
 #[derive(Clone, Copy, Debug, PartialEq, Eq)]
 pub enum HaulBlobError {
-	/// Represents point-to-point link failure with a human-readable explanation of the specific issue is provided.
+	/// Represents point-to-point link failure with a human-readable explanation of the specific
+	/// issue is provided.
 	Transport(&'static str),
 }
 
@@ -361,8 +362,9 @@ impl<
 			message.try_into().map_err(|_| DispatchBlobError::UnsupportedXcmVersion)?;
 
 		// Prepend our bridge instance discriminator.
-		// Can be used for fine-grained control of origin on destination in case of multiple bridge instances,
-		// e.g. restrict `type UniversalAliases` and `UniversalOrigin` instruction to trust just particular bridge instance for `NetworkId`.
+		// Can be used for fine-grained control of origin on destination in case of multiple bridge
+		// instances, e.g. restrict `type UniversalAliases` and `UniversalOrigin` instruction to
+		// trust just particular bridge instance for `NetworkId`.
 		if let Some(bridge_instance) = OurPlaceBridgeInstance::get() {
 			message.0.insert(0, DescendOrigin(bridge_instance));
 		}
diff --git a/xcm/xcm-builder/src/weight.rs b/xcm/xcm-builder/src/weight.rs
index 73cba6cb557b..f1c14a4c6517 100644
--- a/xcm/xcm-builder/src/weight.rs
+++ b/xcm/xcm-builder/src/weight.rs
@@ -114,8 +114,9 @@ where
 	}
 }
 
-/// Function trait for handling some revenue. Similar to a negative imbalance (credit) handler, but for a
-/// `MultiAsset`. Sensible implementations will deposit the asset in some known treasury or block-author account.
+/// Function trait for handling some revenue. Similar to a negative imbalance (credit) handler, but
+/// for a `MultiAsset`. Sensible implementations will deposit the asset in some known treasury or
+/// block-author account.
 pub trait TakeRevenue {
 	/// Do something with the given `revenue`, which is a single non-wildcard `MultiAsset`.
 	fn take_revenue(revenue: MultiAsset);
diff --git a/xcm/xcm-builder/tests/scenarios.rs b/xcm/xcm-builder/tests/scenarios.rs
index e587c4118e74..3e735720aa76 100644
--- a/xcm/xcm-builder/tests/scenarios.rs
+++ b/xcm/xcm-builder/tests/scenarios.rs
@@ -101,8 +101,8 @@ fn transfer_asset_works() {
 /// A parachain wants to be notified that a transfer worked correctly.
 /// It includes a `QueryHolding` order after the deposit to get notified on success.
 /// This somewhat abuses `QueryHolding` as an indication of execution success. It works because
-/// order execution halts on error (so no `QueryResponse` will be sent if the previous order failed).
-/// The inner response sent due to the query is not used.
+/// order execution halts on error (so no `QueryResponse` will be sent if the previous order
+/// failed). The inner response sent due to the query is not used.
 ///
 /// Asserts that the balances are updated correctly and the expected XCM is sent.
 #[test]
diff --git a/xcm/xcm-executor/src/assets.rs b/xcm/xcm-executor/src/assets.rs
index f5e0659931eb..d8d8936df331 100644
--- a/xcm/xcm-executor/src/assets.rs
+++ b/xcm/xcm-executor/src/assets.rs
@@ -132,15 +132,17 @@ impl Assets {
 
 	/// Mutate `self` to contain all given `assets`, saturating if necessary.
 	///
-	/// NOTE: [`Assets`] are always sorted, allowing us to optimize this function from `O(n^2)` to `O(n)`.
+	/// NOTE: [`Assets`] are always sorted, allowing us to optimize this function from `O(n^2)` to
+	/// `O(n)`.
 	pub fn subsume_assets(&mut self, mut assets: Assets) {
 		let mut f_iter = assets.fungible.iter_mut();
 		let mut g_iter = self.fungible.iter_mut();
 		if let (Some(mut f), Some(mut g)) = (f_iter.next(), g_iter.next()) {
 			loop {
 				if f.0 == g.0 {
-					// keys are equal. in this case, we add `self`'s balance for the asset onto `assets`, balance, knowing
-					// that the `append` operation which follows will clobber `self`'s value and only use `assets`'s.
+					// keys are equal. in this case, we add `self`'s balance for the asset onto
+					// `assets`, balance, knowing that the `append` operation which follows will
+					// clobber `self`'s value and only use `assets`'s.
 					(*f.1).saturating_accrue(*g.1);
 				}
 				if f.0 <= g.0 {
@@ -186,8 +188,9 @@ impl Assets {
 
 	/// Alter any concretely identified assets by prepending the given `MultiLocation`.
 	///
-	/// WARNING: For now we consider this infallible and swallow any errors. It is thus the caller's responsibility to
-	/// ensure that any internal asset IDs are able to be prepended without overflow.
+	/// WARNING: For now we consider this infallible and swallow any errors. It is thus the caller's
+	/// responsibility to ensure that any internal asset IDs are able to be prepended without
+	/// overflow.
 	pub fn prepend_location(&mut self, prepend: &MultiLocation) {
 		let mut fungible = Default::default();
 		mem::swap(&mut self.fungible, &mut fungible);
@@ -269,8 +272,8 @@ impl Assets {
 			self.non_fungible.is_superset(&assets.non_fungible)
 	}
 
-	/// Returns an error unless all `assets` are contained in `self`. In the case of an error, the first asset in
-	/// `assets` which is not wholly in `self` is returned.
+	/// Returns an error unless all `assets` are contained in `self`. In the case of an error, the
+	/// first asset in `assets` which is not wholly in `self` is returned.
 	pub fn ensure_contains(&self, assets: &MultiAssets) -> Result<(), TakeError> {
 		for asset in assets.inner().iter() {
 			match asset {
@@ -292,16 +295,17 @@ impl Assets {
 
 	/// Mutates `self` to its original value less `mask` and returns assets that were removed.
 	///
-	/// If `saturate` is `true`, then `self` is considered to be masked by `mask`, thereby avoiding any attempt at
-	/// reducing it by assets it does not contain. In this case, the function is infallible. If `saturate` is `false`
-	/// and `mask` references a definite asset which `self` does not contain then an error is returned.
+	/// If `saturate` is `true`, then `self` is considered to be masked by `mask`, thereby avoiding
+	/// any attempt at reducing it by assets it does not contain. In this case, the function is
+	/// infallible. If `saturate` is `false` and `mask` references a definite asset which `self`
+	/// does not contain then an error is returned.
 	///
 	/// The number of unique assets which are removed will respect the `count` parameter in the
 	/// counted wildcard variants.
 	///
-	/// Returns `Ok` with the definite assets token from `self` and mutates `self` to its value minus
-	/// `mask`. Returns `Err` in the non-saturating case where `self` did not contain (enough of) a definite asset to
-	/// be removed.
+	/// Returns `Ok` with the definite assets token from `self` and mutates `self` to its value
+	/// minus `mask`. Returns `Err` in the non-saturating case where `self` did not contain (enough
+	/// of) a definite asset to be removed.
 	fn general_take(
 		&mut self,
 		mask: MultiAssetFilter,
@@ -386,24 +390,27 @@ impl Assets {
 		Ok(taken)
 	}
 
-	/// Mutates `self` to its original value less `mask` and returns `true` iff it contains at least `mask`.
+	/// Mutates `self` to its original value less `mask` and returns `true` iff it contains at least
+	/// `mask`.
 	///
-	/// Returns `Ok` with the non-wildcard equivalence of `mask` taken and mutates `self` to its value minus
-	/// `mask` if `self` contains `asset`, and return `Err` otherwise.
+	/// Returns `Ok` with the non-wildcard equivalence of `mask` taken and mutates `self` to its
+	/// value minus `mask` if `self` contains `asset`, and return `Err` otherwise.
 	pub fn saturating_take(&mut self, asset: MultiAssetFilter) -> Assets {
 		self.general_take(asset, true)
 			.expect("general_take never results in error when saturating")
 	}
 
-	/// Mutates `self` to its original value less `mask` and returns `true` iff it contains at least `mask`.
+	/// Mutates `self` to its original value less `mask` and returns `true` iff it contains at least
+	/// `mask`.
 	///
-	/// Returns `Ok` with the non-wildcard equivalence of `asset` taken and mutates `self` to its value minus
-	/// `asset` if `self` contains `asset`, and return `Err` otherwise.
+	/// Returns `Ok` with the non-wildcard equivalence of `asset` taken and mutates `self` to its
+	/// value minus `asset` if `self` contains `asset`, and return `Err` otherwise.
 	pub fn try_take(&mut self, mask: MultiAssetFilter) -> Result<Assets, TakeError> {
 		self.general_take(mask, false)
 	}
 
-	/// Consumes `self` and returns its original value excluding `asset` iff it contains at least `asset`.
+	/// Consumes `self` and returns its original value excluding `asset` iff it contains at least
+	/// `asset`.
 	pub fn checked_sub(mut self, asset: MultiAsset) -> Result<Assets, Assets> {
 		match asset.fun {
 			Fungible(amount) => {
diff --git a/xcm/xcm-executor/src/lib.rs b/xcm/xcm-executor/src/lib.rs
index 57ddc4322923..a48cd3259d67 100644
--- a/xcm/xcm-executor/src/lib.rs
+++ b/xcm/xcm-executor/src/lib.rs
@@ -356,7 +356,8 @@ impl<Config: config::Config> XcmExecutor<Config> {
 	}
 
 	/// Execute any final operations after having executed the XCM message.
-	/// This includes refunding surplus weight, trapping extra holding funds, and returning any errors during execution.
+	/// This includes refunding surplus weight, trapping extra holding funds, and returning any
+	/// errors during execution.
 	pub fn post_process(mut self, xcm_weight: Weight) -> Outcome {
 		// We silently drop any error from our attempt to refund the surplus as it's a charitable
 		// thing so best-effort is all we will do.
@@ -533,9 +534,10 @@ impl<Config: config::Config> XcmExecutor<Config> {
 						Config::IsTeleporter::contains(asset, &origin),
 						XcmError::UntrustedTeleportLocation
 					);
-					// We should check that the asset can actually be teleported in (for this to be in error, there
-					// would need to be an accounting violation by one of the trusted chains, so it's unlikely, but we
-					// don't want to punish a possibly innocent chain/user).
+					// We should check that the asset can actually be teleported in (for this to be
+					// in error, there would need to be an accounting violation by one of the
+					// trusted chains, so it's unlikely, but we don't want to punish a possibly
+					// innocent chain/user).
 					Config::AssetTransactor::can_check_in(&origin, asset, &self.context)?;
 				}
 				for asset in assets.into_inner().into_iter() {
@@ -603,8 +605,8 @@ impl<Config: config::Config> XcmExecutor<Config> {
 				Ok(())
 			},
 			ReportError(response_info) => {
-				// Report the given result by sending a QueryResponse XCM to a previously given outcome
-				// destination if one was registered.
+				// Report the given result by sending a QueryResponse XCM to a previously given
+				// outcome destination if one was registered.
 				self.respond(
 					self.cloned_origin(),
 					Response::ExecutionResult(self.error),
@@ -823,10 +825,12 @@ impl<Config: config::Config> XcmExecutor<Config> {
 				Ok(())
 			},
 			ExportMessage { network, destination, xcm } => {
-				// The actual message sent to the bridge for forwarding is prepended with `UniversalOrigin`
-				// and `DescendOrigin` in order to ensure that the message is executed with this Origin.
+				// The actual message sent to the bridge for forwarding is prepended with
+				// `UniversalOrigin` and `DescendOrigin` in order to ensure that the message is
+				// executed with this Origin.
 				//
-				// Prepend the desired message with instructions which effectively rewrite the origin.
+				// Prepend the desired message with instructions which effectively rewrite the
+				// origin.
 				//
 				// This only works because the remote chain empowers the bridge
 				// to speak for the local network.
diff --git a/xcm/xcm-executor/src/traits/asset_exchange.rs b/xcm/xcm-executor/src/traits/asset_exchange.rs
index 465468992ae4..0cb188d348de 100644
--- a/xcm/xcm-executor/src/traits/asset_exchange.rs
+++ b/xcm/xcm-executor/src/traits/asset_exchange.rs
@@ -24,8 +24,8 @@ pub trait AssetExchange {
 	/// - `origin`: The location attempting the exchange; this should generally not matter.
 	/// - `give`: The assets which have been removed from the caller.
 	/// - `want`: The minimum amount of assets which should be given to the caller in case any
-	///   exchange happens. If more assets are provided, then they should generally be of the
-	///   same asset class if at all possible.
+	///   exchange happens. If more assets are provided, then they should generally be of the same
+	///   asset class if at all possible.
 	/// - `maximal`: If `true`, then as much as possible should be exchanged.
 	///
 	/// `Ok` is returned along with the new set of assets which have been exchanged for `give`. At
diff --git a/xcm/xcm-executor/src/traits/asset_lock.rs b/xcm/xcm-executor/src/traits/asset_lock.rs
index bb19e90b0c36..b5a2b22f5fc5 100644
--- a/xcm/xcm-executor/src/traits/asset_lock.rs
+++ b/xcm/xcm-executor/src/traits/asset_lock.rs
@@ -69,8 +69,8 @@ pub trait AssetLock {
 	/// unlock.
 	type UnlockTicket: Enact;
 
-	/// `Enact` implementer for `prepare_reduce_unlockable`. This type may be dropped safely to avoid doing the
-	/// unlock.
+	/// `Enact` implementer for `prepare_reduce_unlockable`. This type may be dropped safely to
+	/// avoid doing the unlock.
 	type ReduceTicket: Enact;
 
 	/// Prepare to lock an asset. On success, a `Self::LockTicket` it returned, which can be used
diff --git a/xcm/xcm-executor/src/traits/conversion.rs b/xcm/xcm-executor/src/traits/conversion.rs
index 2f584a900f69..dac099ffaf8e 100644
--- a/xcm/xcm-executor/src/traits/conversion.rs
+++ b/xcm/xcm-executor/src/traits/conversion.rs
@@ -40,9 +40,9 @@ impl<AccountId> ConvertLocation<AccountId> for Tuple {
 
 /// A converter `trait` for origin types.
 ///
-/// Can be amalgamated into tuples. If any of the tuple elements returns `Ok(_)`, it short circuits. Else, the `Err(_)`
-/// of the last tuple item is returned. Each intermediate `Err(_)` might return a different `origin` of type `Origin`
-/// which is passed to the next convert item.
+/// Can be amalgamated into tuples. If any of the tuple elements returns `Ok(_)`, it short circuits.
+/// Else, the `Err(_)` of the last tuple item is returned. Each intermediate `Err(_)` might return a
+/// different `origin` of type `Origin` which is passed to the next convert item.
 ///
 /// ```rust
 /// # use xcm::latest::{MultiLocation, Junctions, Junction, OriginKind};
diff --git a/xcm/xcm-executor/src/traits/filter_asset_location.rs b/xcm/xcm-executor/src/traits/filter_asset_location.rs
index 7aeb26b28094..b162a8b0729d 100644
--- a/xcm/xcm-executor/src/traits/filter_asset_location.rs
+++ b/xcm/xcm-executor/src/traits/filter_asset_location.rs
@@ -19,7 +19,8 @@ use xcm::latest::{MultiAsset, MultiLocation};
 
 /// Filters assets/location pairs.
 ///
-/// Can be amalgamated into tuples. If any item returns `true`, it short-circuits, else `false` is returned.
+/// Can be amalgamated into tuples. If any item returns `true`, it short-circuits, else `false` is
+/// returned.
 #[deprecated = "Use `frame_support::traits::ContainsPair<MultiAsset, MultiLocation>` instead"]
 pub trait FilterAssetLocation {
 	/// A filter to distinguish between asset/location pairs.
diff --git a/xcm/xcm-executor/src/traits/on_response.rs b/xcm/xcm-executor/src/traits/on_response.rs
index 34bb7eb9597d..b0f8b35bb98f 100644
--- a/xcm/xcm-executor/src/traits/on_response.rs
+++ b/xcm/xcm-executor/src/traits/on_response.rs
@@ -107,11 +107,14 @@ impl VersionChangeNotifier for () {
 /// The possible state of an XCM query response.
 #[derive(Debug, PartialEq, Eq)]
 pub enum QueryResponseStatus<BlockNumber> {
-	/// The response has arrived, and includes the inner Response and the block number it arrived at.
+	/// The response has arrived, and includes the inner Response and the block number it arrived
+	/// at.
 	Ready { response: Response, at: BlockNumber },
-	/// The response has not yet arrived, the XCM might still be executing or the response might be in transit.
+	/// The response has not yet arrived, the XCM might still be executing or the response might be
+	/// in transit.
 	Pending { timeout: BlockNumber },
-	/// No response with the given `QueryId` was found, or the response was already queried and removed from local storage.
+	/// No response with the given `QueryId` was found, or the response was already queried and
+	/// removed from local storage.
 	NotFound,
 	/// Got an unexpected XCM version.
 	UnexpectedVersion,
@@ -144,7 +147,8 @@ pub trait QueryHandler {
 	///
 	/// - `message`: The message whose outcome should be reported.
 	/// - `responder`: The origin from which a response should be expected.
-	/// - `timeout`: The block number after which it is permissible to return `NotFound` from `take_response`.
+	/// - `timeout`: The block number after which it is permissible to return `NotFound` from
+	///   `take_response`.
 	///
 	/// `report_outcome` may return an error if the `responder` is not invertible.
 	///
diff --git a/xcm/xcm-executor/src/traits/should_execute.rs b/xcm/xcm-executor/src/traits/should_execute.rs
index 2b634e375136..d85458b54709 100644
--- a/xcm/xcm-executor/src/traits/should_execute.rs
+++ b/xcm/xcm-executor/src/traits/should_execute.rs
@@ -32,8 +32,8 @@ pub struct Properties {
 
 /// Trait to determine whether the execution engine should actually execute a given XCM.
 ///
-/// Can be amalgamated into a tuple to have multiple trials. If any of the tuple elements returns `Ok()`, the
-/// execution stops. Else, `Err(_)` is returned if all elements reject the message.
+/// Can be amalgamated into a tuple to have multiple trials. If any of the tuple elements returns
+/// `Ok()`, the execution stops. Else, `Err(_)` is returned if all elements reject the message.
 pub trait ShouldExecute {
 	/// Returns `true` if the given `message` may be executed.
 	///
diff --git a/xcm/xcm-executor/src/traits/transact_asset.rs b/xcm/xcm-executor/src/traits/transact_asset.rs
index 832397a0fd25..34cdb0c71413 100644
--- a/xcm/xcm-executor/src/traits/transact_asset.rs
+++ b/xcm/xcm-executor/src/traits/transact_asset.rs
@@ -20,11 +20,13 @@ use xcm::latest::{Error as XcmError, MultiAsset, MultiLocation, Result as XcmRes
 
 /// Facility for asset transacting.
 ///
-/// This should work with as many asset/location combinations as possible. Locations to support may include non-account
-/// locations such as a `MultiLocation::X1(Junction::Parachain)`. Different chains may handle them in different ways.
+/// This should work with as many asset/location combinations as possible. Locations to support may
+/// include non-account locations such as a `MultiLocation::X1(Junction::Parachain)`. Different
+/// chains may handle them in different ways.
 ///
-/// Can be amalgamated as a tuple of items that implement this trait. In such executions, if any of the transactors
-/// returns `Ok(())`, then it will short circuit. Else, execution is passed to the next transactor.
+/// Can be amalgamated as a tuple of items that implement this trait. In such executions, if any of
+/// the transactors returns `Ok(())`, then it will short circuit. Else, execution is passed to the
+/// next transactor.
 pub trait TransactAsset {
 	/// Ensure that `check_in` will do as expected.
 	///
@@ -37,19 +39,23 @@ pub trait TransactAsset {
 		Err(XcmError::Unimplemented)
 	}
 
-	/// An asset has been teleported in from the given origin. This should do whatever housekeeping is needed.
+	/// An asset has been teleported in from the given origin. This should do whatever housekeeping
+	/// is needed.
 	///
-	/// NOTE: This will make only a best-effort at bookkeeping. The caller should ensure that `can_check_in` has
-	/// returned with `Ok` in order to guarantee that this operation proceeds properly.
+	/// NOTE: This will make only a best-effort at bookkeeping. The caller should ensure that
+	/// `can_check_in` has returned with `Ok` in order to guarantee that this operation proceeds
+	/// properly.
 	///
-	/// Implementation note: In general this will do one of two things: On chains where the asset is native,
-	/// it will reduce the assets from a special "teleported" account so that a) total-issuance is preserved;
-	/// and b) to ensure that no more assets can be teleported in than were teleported out overall (this should
-	/// not be needed if the teleporting chains are to be trusted, but better to be safe than sorry). On chains
-	/// where the asset is not native then it will generally just be a no-op.
+	/// Implementation note: In general this will do one of two things: On chains where the asset is
+	/// native, it will reduce the assets from a special "teleported" account so that a)
+	/// total-issuance is preserved; and b) to ensure that no more assets can be teleported in than
+	/// were teleported out overall (this should not be needed if the teleporting chains are to be
+	/// trusted, but better to be safe than sorry). On chains where the asset is not native then it
+	/// will generally just be a no-op.
 	///
-	/// When composed as a tuple, all type-items are called. It is up to the implementer that there exists no
-	/// value for `_what` which can cause side-effects for more than one of the type-items.
+	/// When composed as a tuple, all type-items are called. It is up to the implementer that there
+	/// exists no value for `_what` which can cause side-effects for more than one of the
+	/// type-items.
 	fn check_in(_origin: &MultiLocation, _what: &MultiAsset, _context: &XcmContext) {}
 
 	/// Ensure that `check_out` will do as expected.
@@ -63,16 +69,19 @@ pub trait TransactAsset {
 		Err(XcmError::Unimplemented)
 	}
 
-	/// An asset has been teleported out to the given destination. This should do whatever housekeeping is needed.
+	/// An asset has been teleported out to the given destination. This should do whatever
+	/// housekeeping is needed.
 	///
-	/// Implementation note: In general this will do one of two things: On chains where the asset is native,
-	/// it will increase the assets in a special "teleported" account so that a) total-issuance is preserved; and
-	/// b) to ensure that no more assets can be teleported in than were teleported out overall (this should not
-	/// be needed if the teleporting chains are to be trusted, but better to be safe than sorry). On chains where
-	/// the asset is not native then it will generally just be a no-op.
+	/// Implementation note: In general this will do one of two things: On chains where the asset is
+	/// native, it will increase the assets in a special "teleported" account so that a)
+	/// total-issuance is preserved; and b) to ensure that no more assets can be teleported in than
+	/// were teleported out overall (this should not be needed if the teleporting chains are to be
+	/// trusted, but better to be safe than sorry). On chains where the asset is not native then it
+	/// will generally just be a no-op.
 	///
-	/// When composed as a tuple, all type-items are called. It is up to the implementer that there exists no
-	/// value for `_what` which can cause side-effects for more than one of the type-items.
+	/// When composed as a tuple, all type-items are called. It is up to the implementer that there
+	/// exists no value for `_what` which can cause side-effects for more than one of the
+	/// type-items.
 	fn check_out(_dest: &MultiLocation, _what: &MultiAsset, _context: &XcmContext) {}
 
 	/// Deposit the `what` asset into the account of `who`.
diff --git a/xcm/xcm-executor/src/traits/weight.rs b/xcm/xcm-executor/src/traits/weight.rs
index 06e6b5f55bce..bc40c10074f5 100644
--- a/xcm/xcm-executor/src/traits/weight.rs
+++ b/xcm/xcm-executor/src/traits/weight.rs
@@ -56,8 +56,8 @@ pub trait WeightTrader: Sized {
 		context: &XcmContext,
 	) -> Result<Assets, XcmError>;
 
-	/// Attempt a refund of `weight` into some asset. The caller does not guarantee that the weight was
-	/// purchased using `buy_weight`.
+	/// Attempt a refund of `weight` into some asset. The caller does not guarantee that the weight
+	/// was purchased using `buy_weight`.
 	///
 	/// Default implementation refunds nothing.
 	fn refund_weight(&mut self, _weight: Weight, _context: &XcmContext) -> Option<MultiAsset> {
@@ -93,8 +93,8 @@ impl WeightTrader for Tuple {
 
 		log::trace!(target: "xcm::buy_weight", "last_error: {:?}, too_expensive_error_found: {}", last_error, too_expensive_error_found);
 
-		// if we have multiple traders, and first one returns `TooExpensive` and others fail e.g. `AssetNotFound`
-		// then it is more accurate to return `TooExpensive` then `AssetNotFound`
+		// if we have multiple traders, and first one returns `TooExpensive` and others fail e.g.
+		// `AssetNotFound` then it is more accurate to return `TooExpensive` then `AssetNotFound`
 		Err(if too_expensive_error_found {
 			XcmError::TooExpensive
 		} else {
diff --git a/xcm/xcm-simulator/src/lib.rs b/xcm/xcm-simulator/src/lib.rs
index f98eb6e571e6..cf56784f7d4e 100644
--- a/xcm/xcm-simulator/src/lib.rs
+++ b/xcm/xcm-simulator/src/lib.rs
@@ -161,12 +161,12 @@ macro_rules! decl_test_relay_chain {
 ///
 /// ```ignore
 /// decl_test_parachain! {
-///	    pub struct ParaA {
-///	        Runtime = parachain::Runtime,
-///	        XcmpMessageHandler = parachain::MsgQueue,
-///	        DmpMessageHandler = parachain::MsgQueue,
-///	        new_ext = para_ext(),
-///	    }
+/// 	    pub struct ParaA {
+/// 	        Runtime = parachain::Runtime,
+/// 	        XcmpMessageHandler = parachain::MsgQueue,
+/// 	        DmpMessageHandler = parachain::MsgQueue,
+/// 	        new_ext = para_ext(),
+/// 	    }
 /// }
 /// ```
 #[macro_export]
@@ -272,13 +272,13 @@ thread_local! {
 ///
 /// ```ignore
 /// decl_test_network! {
-///	    pub struct ExampleNet {
-///	        relay_chain = Relay,
-///	        parachains = vec![
-///	            (1, ParaA),
-///	            (2, ParaB),
-///	        ],
-///	    }
+/// 	    pub struct ExampleNet {
+/// 	        relay_chain = Relay,
+/// 	        parachains = vec![
+/// 	            (1, ParaA),
+/// 	            (2, ParaB),
+/// 	        ],
+/// 	    }
 /// }
 /// ```
 #[macro_export]