diff --git a/Cargo.lock b/Cargo.lock index 439e4052adff..1f565739611e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1392,10 +1392,10 @@ version = "13.0.0" dependencies = [ "array-bytes 6.1.0", "env_logger 0.9.3", - "hash-db", "log", "sp-core", "sp-runtime", + "subtrie", ] [[package]] @@ -1851,7 +1851,6 @@ version = "0.7.0" dependencies = [ "frame-support", "frame-system", - "hash-db", "hex-literal", "impl-trait-for-tuples", "log", @@ -1865,7 +1864,7 @@ dependencies = [ "sp-state-machine", "sp-std 14.0.0", "sp-trie", - "trie-db", + "subtrie", ] [[package]] @@ -2252,7 +2251,6 @@ dependencies = [ "bp-xcm-bridge-hub-router", "frame-support", "frame-system", - "hash-db", "log", "pallet-balances", "pallet-bridge-grandpa", @@ -2272,6 +2270,7 @@ dependencies = [ "staging-xcm", "staging-xcm-builder", "static_assertions", + "subtrie", ] [[package]] @@ -3882,8 +3881,7 @@ dependencies = [ "sp-trie", "sp-version", "staging-xcm", - "trie-db", - "trie-standardmap", + "subtrie", ] [[package]] @@ -6281,16 +6279,18 @@ dependencies = [ ] [[package]] -name = "hash-db" -version = "0.16.0" +name = "hash256-std-hasher" +version = "0.15.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e7d7786361d7425ae2fe4f9e407eb0efaa0840f5212d109cc018c40c35c6ab4" +checksum = "92c171d55b98633f4ed3860808f004099b36c1cc29c42cfc53aa8591b21efcf2" +dependencies = [ + "crunchy", +] [[package]] name = "hash256-std-hasher" version = "0.15.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92c171d55b98633f4ed3860808f004099b36c1cc29c42cfc53aa8591b21efcf2" +source = "git+https://github.com/paritytech/subtrie.git?branch=master#61c6261ab9a0dc1314a22e012fbc8f5c61f88c97" dependencies = [ "crunchy", ] @@ -7012,17 +7012,6 @@ dependencies = [ "cpufeatures", ] -[[package]] -name = "keccak-hasher" -version = "0.16.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19ea4653859ca2266a86419d3f592d3f22e7a854b482f99180d2498507902048" -dependencies = [ - "hash-db", - "hash256-std-hasher", - "tiny-keccak", -] - [[package]] name = "keystream" version = "1.0.0" @@ -8069,15 +8058,6 @@ dependencies = [ "autocfg", ] -[[package]] -name = "memory-db" -version = "0.32.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "808b50db46293432a45e63bc15ea51e0ab4c0a1647b8eb114e31a3e698dd6fbe" -dependencies = [ - "hash-db", -] - [[package]] name = "merlin" version = "3.0.0" @@ -8590,7 +8570,6 @@ dependencies = [ "derive_more", "fs_extra", "futures", - "hash-db", "kitchensink-runtime", "kvdb", "kvdb-rocksdb", @@ -8602,6 +8581,7 @@ dependencies = [ "rand", "sc-basic-authorship", "sc-client-api", + "sc-client-db", "sc-transaction-pool", "sc-transaction-pool-api", "serde", @@ -8614,6 +8594,7 @@ dependencies = [ "sp-timestamp", "sp-tracing 16.0.0", "sp-trie", + "subtrie", "tempfile", ] @@ -11448,9 +11429,8 @@ checksum = "16b56e3a2420138bdb970f84dfb9c774aea80fa0e7371549eedec0d80c209c67" [[package]] name = "parity-db" -version = "0.4.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59e9ab494af9e6e813c72170f0d3c1de1500990d62c97cc05cc7576f91aa402f" +version = "0.4.13" +source = "git+https://github.com/paritytech/parity-db.git?branch=master#a1982234fca6bea37dcc19a9d3744919f736c288" dependencies = [ "blake2 0.10.6", "crc32fast", @@ -11464,6 +11444,7 @@ dependencies = [ "rand", "siphasher", "snap", + "winapi", ] [[package]] @@ -15764,7 +15745,7 @@ version = "0.35.0" dependencies = [ "array-bytes 6.1.0", "criterion 0.4.0", - "hash-db", + "fs_extra", "kitchensink-runtime", "kvdb", "kvdb-memorydb", @@ -15787,7 +15768,7 @@ dependencies = [ "sp-state-machine", "sp-tracing 16.0.0", "sp-trie", - "substrate-test-runtime-client", + "subtrie", "tempfile", ] @@ -18346,7 +18327,6 @@ dependencies = [ name = "sp-api" version = "26.0.0" dependencies = [ - "hash-db", "log", "parity-scale-codec", "scale-info", @@ -18361,6 +18341,7 @@ dependencies = [ "sp-test-primitives", "sp-trie", "sp-version", + "subtrie", "thiserror", ] @@ -18648,8 +18629,7 @@ dependencies = [ "dyn-clonable", "ed25519-zebra 3.1.0", "futures", - "hash-db", - "hash256-std-hasher", + "hash256-std-hasher 0.15.2 (registry+https://github.com/rust-lang/crates.io-index)", "impl-serde", "itertools 0.10.5", "k256", @@ -18678,6 +18658,7 @@ dependencies = [ "sp-storage 19.0.0", "ss58-registry", "substrate-bip39", + "subtrie", "thiserror", "tracing", "w3f-bls", @@ -18777,7 +18758,9 @@ name = "sp-database" version = "10.0.0" dependencies = [ "kvdb", + "parity-db", "parking_lot 0.12.1", + "subtrie", ] [[package]] @@ -18997,7 +18980,7 @@ version = "31.0.1" dependencies = [ "docify 0.2.7", "either", - "hash256-std-hasher", + "hash256-std-hasher 0.15.2 (registry+https://github.com/rust-lang/crates.io-index)", "impl-trait-for-tuples", "log", "parity-scale-codec", @@ -19157,7 +19140,6 @@ version = "0.35.0" dependencies = [ "array-bytes 6.1.0", "assert_matches", - "hash-db", "log", "parity-scale-codec", "parking_lot 0.12.1", @@ -19170,9 +19152,9 @@ dependencies = [ "sp-runtime", "sp-std 14.0.0", "sp-trie", + "subtrie", "thiserror", "tracing", - "trie-db", ] [[package]] @@ -19309,10 +19291,8 @@ version = "29.0.0" dependencies = [ "ahash 0.8.8", "array-bytes 6.1.0", - "criterion 0.4.0", - "hash-db", + "criterion 0.5.1", "lazy_static", - "memory-db", "nohash-hasher", "parity-scale-codec", "parking_lot 0.12.1", @@ -19323,12 +19303,9 @@ dependencies = [ "sp-externalities 0.25.0", "sp-runtime", "sp-std 14.0.0", + "subtrie", "thiserror", "tracing", - "trie-bench", - "trie-db", - "trie-root", - "trie-standardmap", ] [[package]] @@ -19936,7 +19913,7 @@ dependencies = [ "sp-runtime", "sp-state-machine", "sp-trie", - "trie-db", + "subtrie", ] [[package]] @@ -20013,7 +19990,7 @@ dependencies = [ "sp-version", "substrate-test-runtime-client", "substrate-wasm-builder", - "trie-db", + "subtrie", ] [[package]] @@ -20094,6 +20071,20 @@ version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "734676eb262c623cec13c3155096e08d1f8f29adce39ba17948b18dad1e54142" +[[package]] +name = "subtrie" +version = "0.0.1" +source = "git+https://github.com/paritytech/subtrie.git?branch=master#61c6261ab9a0dc1314a22e012fbc8f5c61f88c97" +dependencies = [ + "criterion 0.5.1", + "hash256-std-hasher 0.15.2 (git+https://github.com/paritytech/subtrie.git?branch=master)", + "log", + "parity-scale-codec", + "rustc-hex", + "smallvec", + "tiny-keccak", +] + [[package]] name = "sval" version = "2.6.1" @@ -20967,54 +20958,6 @@ dependencies = [ "tracing-log 0.2.0", ] -[[package]] -name = "trie-bench" -version = "0.38.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4680cb226e31d2a096592d0edecdda91cc371743002f80c0f8cf80219819b3b" -dependencies = [ - "criterion 0.4.0", - "hash-db", - "keccak-hasher", - "memory-db", - "parity-scale-codec", - "trie-db", - "trie-root", - "trie-standardmap", -] - -[[package]] -name = "trie-db" -version = "0.28.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff28e0f815c2fea41ebddf148e008b077d2faddb026c9555b29696114d602642" -dependencies = [ - "hash-db", - "hashbrown 0.13.2", - "log", - "rustc-hex", - "smallvec", -] - -[[package]] -name = "trie-root" -version = "0.18.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d4ed310ef5ab98f5fa467900ed906cb9232dd5376597e00fd4cba2a449d06c0b" -dependencies = [ - "hash-db", -] - -[[package]] -name = "trie-standardmap" -version = "0.16.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "684aafb332fae6f83d7fe10b3fbfdbe39a1b3234c4e2a618f030815838519516" -dependencies = [ - "hash-db", - "keccak-hasher", -] - [[package]] name = "trust-dns-proto" version = "0.22.0" diff --git a/Cargo.toml b/Cargo.toml index f256d02808a8..19c21cc060b0 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -600,7 +600,6 @@ curve25519-dalek = { opt-level = 3 } ed25519-dalek = { opt-level = 3 } flate2 = { opt-level = 3 } futures-channel = { opt-level = 3 } -hash-db = { opt-level = 3 } hashbrown = { opt-level = 3 } hmac = { opt-level = 3 } httparse = { opt-level = 3 } @@ -632,3 +631,7 @@ wasmi = { opt-level = 3 } x25519-dalek = { opt-level = 3 } yamux = { opt-level = 3 } zeroize = { opt-level = 3 } + +[patch.crates-io] +subtrie = { git = "https://github.com/paritytech/subtrie.git", branch = "master" } +parity-db = { git = "https://github.com/paritytech/parity-db.git", branch = "master" } diff --git a/bridges/bin/runtime-common/Cargo.toml b/bridges/bin/runtime-common/Cargo.toml index fac88b20ca57..46068b0331cd 100644 --- a/bridges/bin/runtime-common/Cargo.toml +++ b/bridges/bin/runtime-common/Cargo.toml @@ -12,7 +12,7 @@ workspace = true [dependencies] codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false, features = ["derive"] } -hash-db = { version = "0.16.0", default-features = false } +trie-db = { package = "subtrie", version = "0.0.1", default-features = false } log = { workspace = true } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } static_assertions = { version = "1.1", optional = true } @@ -67,7 +67,6 @@ std = [ "codec/std", "frame-support/std", "frame-system/std", - "hash-db/std", "log/std", "pallet-bridge-grandpa/std", "pallet-bridge-messages/std", @@ -82,6 +81,7 @@ std = [ "sp-runtime/std", "sp-std/std", "sp-trie/std", + "trie-db/std", "xcm-builder/std", "xcm/std", ] diff --git a/bridges/bin/runtime-common/src/messages.rs b/bridges/bin/runtime-common/src/messages.rs index 4aca53f3b983..aaf515d67937 100644 --- a/bridges/bin/runtime-common/src/messages.rs +++ b/bridges/bin/runtime-common/src/messages.rs @@ -32,10 +32,10 @@ use bp_messages::{ use bp_runtime::{Chain, RawStorageProof, Size, StorageProofChecker}; use codec::{Decode, Encode}; use frame_support::{traits::Get, weights::Weight}; -use hash_db::Hasher; use scale_info::TypeInfo; use sp_runtime::RuntimeDebug; use sp_std::{convert::TryFrom, marker::PhantomData, vec::Vec}; +use trie_db::node_db::Hasher; /// Bidirectional message bridge. pub trait MessageBridge { diff --git a/bridges/bin/runtime-common/src/messages_generation.rs b/bridges/bin/runtime-common/src/messages_generation.rs index c37aaa5d4d53..bb31b0e3864f 100644 --- a/bridges/bin/runtime-common/src/messages_generation.rs +++ b/bridges/bin/runtime-common/src/messages_generation.rs @@ -25,7 +25,7 @@ use bp_messages::{ use bp_runtime::{record_all_trie_keys, RawStorageProof, StorageProofSize}; use codec::Encode; use sp_std::{ops::RangeInclusive, prelude::*}; -use sp_trie::{trie_types::TrieDBMutBuilderV1, LayoutV1, MemoryDB, TrieMut}; +use sp_trie::{trie_types::TrieDBMutBuilderV1, LayoutV1, MemoryDB}; /// Simple and correct message data encode function. pub fn encode_all_messages(_: MessageNonce, m: &MessagePayload) -> Option> { @@ -56,11 +56,9 @@ where // prepare Bridged chain storage with messages and (optionally) outbound lane state let message_count = message_nonces.end().saturating_sub(*message_nonces.start()) + 1; let mut storage_keys = Vec::with_capacity(message_count as usize + 1); - let mut root = Default::default(); let mut mdb = MemoryDB::default(); - { - let mut trie = - TrieDBMutBuilderV1::>>::new(&mut mdb, &mut root).build(); + let root = { + let mut trie = TrieDBMutBuilderV1::>>::new(&mdb).build(); // insert messages for (i, nonce) in message_nonces.into_iter().enumerate() { @@ -96,12 +94,18 @@ where .expect("TrieMut::insert should not fail in benchmarks"); storage_keys.push(storage_key); } - } + + let changeset = trie.commit(); + let root = changeset.root_hash(); + changeset.apply_to(&mut mdb); + root + }; // generate storage proof to be delivered to This chain - let storage_proof = record_all_trie_keys::>>, _>(&mdb, &root) - .map_err(|_| "record_all_trie_keys has failed") - .expect("record_all_trie_keys should not fail in benchmarks"); + let storage_proof = + record_all_trie_keys::>, ()>>(&mdb, &root) + .map_err(|_| "record_all_trie_keys has failed") + .expect("record_all_trie_keys should not fail in benchmarks"); (root, storage_proof) } @@ -118,21 +122,20 @@ where { // prepare Bridged chain storage with inbound lane state let storage_key = storage_keys::inbound_lane_data_key(B::BRIDGED_MESSAGES_PALLET_NAME, &lane).0; - let mut root = Default::default(); let mut mdb = MemoryDB::default(); - { - let mut trie = - TrieDBMutBuilderV1::>>::new(&mut mdb, &mut root).build(); - let inbound_lane_data = grow_trie_leaf_value(inbound_lane_data.encode(), size); - trie.insert(&storage_key, &inbound_lane_data) - .map_err(|_| "TrieMut::insert has failed") - .expect("TrieMut::insert should not fail in benchmarks"); - } + let mut trie = TrieDBMutBuilderV1::>>::new(&mut mdb).build(); + let inbound_lane_data = grow_trie_leaf_value(inbound_lane_data.encode(), size); + trie.insert(&storage_key, &inbound_lane_data) + .map_err(|_| "TrieMut::insert has failed") + .expect("TrieMut::insert should not fail in benchmarks"); + let changeset = trie.commit(); + let root = changeset.apply_to(&mut mdb); // generate storage proof to be delivered to This chain - let storage_proof = record_all_trie_keys::>>, _>(&mdb, &root) - .map_err(|_| "record_all_trie_keys has failed") - .expect("record_all_trie_keys should not fail in benchmarks"); + let storage_proof = + record_all_trie_keys::>, ()>>(&mdb, &root) + .map_err(|_| "record_all_trie_keys has failed") + .expect("record_all_trie_keys should not fail in benchmarks"); (root, storage_proof) } diff --git a/bridges/bin/runtime-common/src/parachains_benchmarking.rs b/bridges/bin/runtime-common/src/parachains_benchmarking.rs index b3050b9ac0f3..05f66798b6ee 100644 --- a/bridges/bin/runtime-common/src/parachains_benchmarking.rs +++ b/bridges/bin/runtime-common/src/parachains_benchmarking.rs @@ -30,7 +30,7 @@ use codec::Encode; use frame_support::traits::Get; use pallet_bridge_parachains::{RelayBlockHash, RelayBlockHasher, RelayBlockNumber}; use sp_std::prelude::*; -use sp_trie::{trie_types::TrieDBMutBuilderV1, LayoutV1, MemoryDB, TrieMut}; +use sp_trie::{trie_types::TrieDBMutBuilderV1, LayoutV1, MemoryDB}; /// Prepare proof of messages for the `receive_messages_proof` call. /// @@ -53,31 +53,29 @@ where // insert all heads to the trie let mut parachain_heads = Vec::with_capacity(parachains.len()); let mut storage_keys = Vec::with_capacity(parachains.len()); - let mut state_root = Default::default(); let mut mdb = MemoryDB::default(); - { - let mut trie = - TrieDBMutBuilderV1::::new(&mut mdb, &mut state_root).build(); + let mut trie = TrieDBMutBuilderV1::::new(&mdb).build(); - // insert parachain heads - for (i, parachain) in parachains.into_iter().enumerate() { - let storage_key = - parachain_head_storage_key_at_source(R::ParasPalletName::get(), *parachain); - let leaf_data = if i == 0 { - grow_trie_leaf_value(parachain_head.encode(), size) - } else { - parachain_head.encode() - }; - trie.insert(&storage_key.0, &leaf_data) - .map_err(|_| "TrieMut::insert has failed") - .expect("TrieMut::insert should not fail in benchmarks"); - storage_keys.push(storage_key); - parachain_heads.push((*parachain, parachain_head.hash())) - } + // insert parachain heads + for (i, parachain) in parachains.into_iter().enumerate() { + let storage_key = + parachain_head_storage_key_at_source(R::ParasPalletName::get(), *parachain); + let leaf_data = if i == 0 { + grow_trie_leaf_value(parachain_head.encode(), size) + } else { + parachain_head.encode() + }; + trie.insert(&storage_key.0, &leaf_data) + .map_err(|_| "TrieMut::insert has failed") + .expect("TrieMut::insert should not fail in benchmarks"); + storage_keys.push(storage_key); + parachain_heads.push((*parachain, parachain_head.hash())) } + let state_root = trie.commit().apply_to(&mut mdb); + // generate heads storage proof - let proof = record_all_trie_keys::, _>(&mdb, &state_root) + let proof = record_all_trie_keys::>(&mdb, &state_root) .map_err(|_| "record_all_trie_keys has failed") .expect("record_all_trie_keys should not fail in benchmarks"); diff --git a/bridges/primitives/runtime/Cargo.toml b/bridges/primitives/runtime/Cargo.toml index 22206fb2c376..bd80e2677d5c 100644 --- a/bridges/primitives/runtime/Cargo.toml +++ b/bridges/primitives/runtime/Cargo.toml @@ -11,7 +11,7 @@ workspace = true [dependencies] codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false } -hash-db = { version = "0.16.0", default-features = false } +trie-db = { package = "subtrie", version = "0.0.1", default-features = false } impl-trait-for-tuples = "0.2.2" log = { workspace = true } num-traits = { version = "0.2", default-features = false } @@ -28,7 +28,6 @@ sp-runtime = { path = "../../../substrate/primitives/runtime", default-features sp-state-machine = { path = "../../../substrate/primitives/state-machine", default-features = false } sp-std = { path = "../../../substrate/primitives/std", default-features = false } sp-trie = { path = "../../../substrate/primitives/trie", default-features = false } -trie-db = { version = "0.28.0", default-features = false } [dev-dependencies] hex-literal = "0.4" @@ -39,7 +38,6 @@ std = [ "codec/std", "frame-support/std", "frame-system/std", - "hash-db/std", "log/std", "num-traits/std", "scale-info/std", @@ -51,4 +49,5 @@ std = [ "sp-std/std", "sp-trie/std", "trie-db/std", + "trie-db/std", ] diff --git a/bridges/primitives/runtime/src/storage_proof.rs b/bridges/primitives/runtime/src/storage_proof.rs index 1b706aa66c16..bb190a0f5e60 100644 --- a/bridges/primitives/runtime/src/storage_proof.rs +++ b/bridges/primitives/runtime/src/storage_proof.rs @@ -19,13 +19,13 @@ use crate::StrippableError; use codec::{Decode, Encode}; use frame_support::PalletError; -use hash_db::{HashDB, Hasher, EMPTY_PREFIX}; use scale_info::TypeInfo; use sp_std::{boxed::Box, collections::btree_set::BTreeSet, vec::Vec}; use sp_trie::{ read_trie_value, LayoutV1, MemoryDB, Recorder, StorageProof, Trie, TrieConfiguration, TrieDBBuilder, TrieError, TrieHash, }; +use trie_db::node_db::{Hasher, EMPTY_PREFIX}; /// Raw storage proof type (just raw trie nodes). pub type RawStorageProof = Vec>; @@ -54,7 +54,7 @@ where proof_nodes_count: usize, root: H::Out, db: MemoryDB, - recorder: Recorder>, + recorder: Recorder>, } impl StorageProofChecker @@ -109,8 +109,14 @@ where /// incomplete or otherwise invalid proof, this function returns an error. pub fn read_value(&mut self, key: &[u8]) -> Result>, Error> { // LayoutV1 or LayoutV0 is identical for proof that only read values. - read_trie_value::, _>(&self.db, &self.root, key, Some(&mut self.recorder), None) - .map_err(|_| Error::StorageValueUnavailable) + read_trie_value::, _>( + &self.db, + &self.root, + key, + Some(&mut self.recorder), + None, + ) + .map_err(|_| Error::StorageValueUnavailable) } /// Reads and decodes a value from the available subset of storage. If the value cannot be read @@ -180,7 +186,7 @@ pub fn craft_valid_storage_proof() -> (sp_core::H256, RawStorageProof) { ], state_version, )); - let root = backend.storage_root(std::iter::empty(), state_version).0; + let root = backend.storage_root(std::iter::empty(), state_version).root_hash(); let proof = prove_read(backend, &[&b"key1"[..], &b"key2"[..], &b"key4"[..], &b"key22"[..]]).unwrap(); @@ -188,13 +194,10 @@ pub fn craft_valid_storage_proof() -> (sp_core::H256, RawStorageProof) { } /// Record all keys for a given root. -pub fn record_all_keys( - db: &DB, +pub fn record_all_keys( + db: &dyn trie_db::node_db::NodeDB, root: &TrieHash, -) -> Result>> -where - DB: hash_db::HashDBRef, -{ +) -> Result>> { let mut recorder = Recorder::::new(); let trie = TrieDBBuilder::::new(db, root).with_recorder(&mut recorder).build(); for x in trie.iter()? { diff --git a/bridges/primitives/test-utils/src/lib.rs b/bridges/primitives/test-utils/src/lib.rs index 1d80890779bf..a0662979fa33 100644 --- a/bridges/primitives/test-utils/src/lib.rs +++ b/bridges/primitives/test-utils/src/lib.rs @@ -27,7 +27,7 @@ use codec::Encode; use sp_consensus_grandpa::{AuthorityId, AuthoritySignature, AuthorityWeight, SetId}; use sp_runtime::traits::{Header as HeaderT, One, Zero}; use sp_std::prelude::*; -use sp_trie::{trie_types::TrieDBMutBuilderV1, LayoutV1, MemoryDB, TrieMut}; +use sp_trie::{trie_types::TrieDBMutBuilderV1, LayoutV1, MemoryDB}; // Re-export all our test account utilities pub use keyring::*; @@ -175,22 +175,20 @@ pub fn prepare_parachain_heads_proof( heads: Vec<(u32, ParaHead)>, ) -> (H::Hash, ParaHeadsProof, Vec<(ParaId, ParaHash)>) { let mut parachains = Vec::with_capacity(heads.len()); - let mut root = Default::default(); let mut mdb = MemoryDB::default(); - { - let mut trie = TrieDBMutBuilderV1::::new(&mut mdb, &mut root).build(); - for (parachain, head) in heads { - let storage_key = - parachain_head_storage_key_at_source(PARAS_PALLET_NAME, ParaId(parachain)); - trie.insert(&storage_key.0, &head.encode()) - .map_err(|_| "TrieMut::insert has failed") - .expect("TrieMut::insert should not fail in tests"); - parachains.push((ParaId(parachain), head.hash())); - } + let mut trie = TrieDBMutBuilderV1::::new(&mdb).build(); + for (parachain, head) in heads { + let storage_key = + parachain_head_storage_key_at_source(PARAS_PALLET_NAME, ParaId(parachain)); + trie.insert(&storage_key.0, &head.encode()) + .map_err(|_| "TrieMut::insert has failed") + .expect("TrieMut::insert should not fail in tests"); + parachains.push((ParaId(parachain), head.hash())); } + let root = trie.commit().apply_to(&mut mdb); // generate storage proof to be delivered to This chain - let storage_proof = record_all_trie_keys::, _>(&mdb, &root) + let storage_proof = record_all_trie_keys::>(&mdb, &root) .map_err(|_| "record_all_trie_keys has failed") .expect("record_all_trie_keys should not fail in benchmarks"); diff --git a/cumulus/pallets/parachain-system/Cargo.toml b/cumulus/pallets/parachain-system/Cargo.toml index 7e0442f0b585..631ab68e5a87 100644 --- a/cumulus/pallets/parachain-system/Cargo.toml +++ b/cumulus/pallets/parachain-system/Cargo.toml @@ -15,8 +15,8 @@ codec = { package = "parity-scale-codec", version = "3.0.0", default-features = environmental = { version = "1.1.4", default-features = false } impl-trait-for-tuples = "0.2.1" log = { workspace = true } -trie-db = { version = "0.28.0", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +trie-db = { package = "subtrie", version = "0.0.1", default-features = false } # Substrate frame-benchmarking = { path = "../../../substrate/frame/benchmarking", default-features = false, optional = true } @@ -49,7 +49,6 @@ cumulus-primitives-proof-size-hostfunction = { path = "../../primitives/proof-si assert_matches = "1.5" hex-literal = "0.4.1" lazy_static = "1.4" -trie-standardmap = "0.16.0" rand = "0.8.5" futures = "0.3.28" diff --git a/cumulus/pallets/parachain-system/src/relay_state_snapshot.rs b/cumulus/pallets/parachain-system/src/relay_state_snapshot.rs index 5519d1521ea6..4d7c166ae19d 100644 --- a/cumulus/pallets/parachain-system/src/relay_state_snapshot.rs +++ b/cumulus/pallets/parachain-system/src/relay_state_snapshot.rs @@ -23,8 +23,8 @@ use cumulus_primitives_core::{ use scale_info::TypeInfo; use sp_runtime::traits::HashingFor; use sp_state_machine::{Backend, TrieBackend, TrieBackendBuilder}; -use sp_std::vec::Vec; -use sp_trie::{HashDBT, MemoryDB, StorageProof, EMPTY_PREFIX}; +use sp_std::{boxed::Box, vec::Vec}; +use sp_trie::{StorageProof, EMPTY_PREFIX}; /// The capacity of the upward message queue of a parachain on the relay chain. // The field order should stay the same as the data can be found in the proof to ensure both are @@ -152,8 +152,7 @@ where /// This state proof is extracted from the relay chain block we are building on top of. pub struct RelayChainStateProof { para_id: ParaId, - trie_backend: - TrieBackend>, HashingFor>, + trie_backend: TrieBackend>, } impl RelayChainStateProof { @@ -170,7 +169,7 @@ impl RelayChainStateProof { if !db.contains(&relay_parent_storage_root, EMPTY_PREFIX) { return Err(Error::RootMismatch) } - let trie_backend = TrieBackendBuilder::new(db, relay_parent_storage_root).build(); + let trie_backend = TrieBackendBuilder::new(Box::new(db), relay_parent_storage_root).build(); Ok(Self { para_id, trie_backend }) } diff --git a/cumulus/pallets/parachain-system/src/validate_block/implementation.rs b/cumulus/pallets/parachain-system/src/validate_block/implementation.rs index ecab7a9a0931..b30e8ad97d9a 100644 --- a/cumulus/pallets/parachain-system/src/validate_block/implementation.rs +++ b/cumulus/pallets/parachain-system/src/validate_block/implementation.rs @@ -34,11 +34,11 @@ use sp_externalities::{set_and_run_with_externalities, Externalities}; use sp_io::KillStorageResult; use sp_runtime::traits::{Block as BlockT, Extrinsic, HashingFor, Header as HeaderT}; use sp_std::prelude::*; -use sp_trie::{MemoryDB, ProofSizeProvider}; + +use sp_trie::ProofSizeProvider; use trie_recorder::SizeOnlyRecorderProvider; type TrieBackend = sp_state_machine::TrieBackend< - MemoryDB>, HashingFor, trie_cache::CacheProvider>, SizeOnlyRecorderProvider>, @@ -130,7 +130,7 @@ where // We use the storage root of the `parent_head` to ensure that it is the correct root. // This is already being done above while creating the in-memory db, but let's be paranoid!! let backend = sp_state_machine::TrieBackendBuilder::new_with_cache( - db, + Box::new(db), *parent_header.state_root(), cache_provider, ) diff --git a/cumulus/pallets/parachain-system/src/validate_block/trie_cache.rs b/cumulus/pallets/parachain-system/src/validate_block/trie_cache.rs index 5d785910fbe0..3bc10b27bcd6 100644 --- a/cumulus/pallets/parachain-system/src/validate_block/trie_cache.rs +++ b/cumulus/pallets/parachain-system/src/validate_block/trie_cache.rs @@ -22,34 +22,36 @@ use sp_std::{ collections::btree_map::{BTreeMap, Entry}, }; use sp_trie::NodeCodec; -use trie_db::{node::NodeOwned, Hasher}; +use trie_db::{node::NodeOwned, node_db::Hasher}; /// Special purpose trie cache implementation that is able to cache an unlimited number /// of values. To be used in `validate_block` to serve values and nodes that /// have already been loaded and decoded from the storage proof. pub(crate) struct TrieCache<'a, H: Hasher> { - node_cache: RefMut<'a, BTreeMap>>, - value_cache: Option, trie_db::CachedValue>>>, + node_cache: RefMut<'a, BTreeMap>>, + value_cache: Option, trie_db::CachedValue>>>, } -impl<'a, H: Hasher> trie_db::TrieCache> for TrieCache<'a, H> { - fn lookup_value_for_key(&mut self, key: &[u8]) -> Option<&trie_db::CachedValue> { +impl<'a, H: Hasher> trie_db::TrieCache, ()> for TrieCache<'a, H> { + fn lookup_value_for_key(&mut self, key: &[u8]) -> Option<&trie_db::CachedValue> { self.value_cache.as_ref().and_then(|cache| cache.get(key)) } - fn cache_value_for_key(&mut self, key: &[u8], value: trie_db::CachedValue) { + fn cache_value_for_key(&mut self, key: &[u8], value: trie_db::CachedValue) { self.value_cache.as_mut().and_then(|cache| cache.insert(key.into(), value)); } fn get_or_insert_node( &mut self, hash: as trie_db::NodeCodec>::HashOut, + _location: (), fetch_node: &mut dyn FnMut() -> trie_db::Result< - NodeOwned, + NodeOwned, H::Out, as trie_db::NodeCodec>::Error, >, - ) -> trie_db::Result<&NodeOwned, H::Out, as trie_db::NodeCodec>::Error> { + ) -> trie_db::Result<&NodeOwned, H::Out, as trie_db::NodeCodec>::Error> + { match self.node_cache.entry(hash) { Entry::Occupied(entry) => Ok(entry.into_mut()), Entry::Vacant(entry) => Ok(entry.insert(fetch_node()?)), @@ -59,21 +61,24 @@ impl<'a, H: Hasher> trie_db::TrieCache> for TrieCache<'a, H> { fn get_node( &mut self, hash: &H::Out, - ) -> Option<&NodeOwned< as trie_db::NodeCodec>::HashOut>> { + _location: (), + ) -> Option<&NodeOwned< as trie_db::NodeCodec>::HashOut, ()>> { self.node_cache.get(hash) } + + fn insert_new_node(&mut self, _hash: &H::Out) {} } /// Provider of [`TrieCache`] instances. pub(crate) struct CacheProvider { - node_cache: RefCell>>, + node_cache: RefCell>>, /// Cache: `storage_root` => `storage_key` => `value`. /// /// One `block` can for example use multiple tries (child tries) and we need to distinguish the /// cached (`storage_key`, `value`) between them. For this we are using the `storage_root` to /// distinguish them (even if the storage root is the same for two child tries, it just means /// that both are exactly the same trie and there would happen no collision). - value_cache: RefCell, trie_db::CachedValue>>>, + value_cache: RefCell, trie_db::CachedValue>>>, } impl CacheProvider { diff --git a/cumulus/pallets/parachain-system/src/validate_block/trie_recorder.rs b/cumulus/pallets/parachain-system/src/validate_block/trie_recorder.rs index 48310670c074..e42162a7b032 100644 --- a/cumulus/pallets/parachain-system/src/validate_block/trie_recorder.rs +++ b/cumulus/pallets/parachain-system/src/validate_block/trie_recorder.rs @@ -27,8 +27,8 @@ use sp_std::{ collections::{btree_map::BTreeMap, btree_set::BTreeSet}, rc::Rc, }; -use sp_trie::{NodeCodec, ProofSizeProvider, StorageProof}; -use trie_db::{Hasher, RecordedForKey, TrieAccess}; +use sp_trie::{DBLocation, NodeCodec, ProofSizeProvider, StorageProof}; +use trie_db::{node_db::Hasher, RecordedForKey, TrieAccess}; /// A trie recorder that only keeps track of the proof size. /// @@ -40,8 +40,8 @@ pub(crate) struct SizeOnlyRecorder<'a, H: Hasher> { recorded_keys: RefMut<'a, BTreeMap, RecordedForKey>>, } -impl<'a, H: trie_db::Hasher> trie_db::TrieRecorder for SizeOnlyRecorder<'a, H> { - fn record(&mut self, access: TrieAccess<'_, H::Out>) { +impl<'a, H: Hasher> trie_db::TrieRecorder for SizeOnlyRecorder<'a, H> { + fn record(&mut self, access: TrieAccess<'_, H::Out, DBLocation>) { let mut encoded_size_update = 0; match access { TrieAccess::NodeOwned { hash, node_owned } => @@ -114,10 +114,10 @@ impl SizeOnlyRecorderProvider { } } -impl sp_trie::TrieRecorderProvider for SizeOnlyRecorderProvider { +impl sp_trie::TrieRecorderProvider for SizeOnlyRecorderProvider { type Recorder<'a> = SizeOnlyRecorder<'a, H> where H: 'a; - fn drain_storage_proof(self) -> Option { + fn drain_storage_proof(&self) -> Option { None } @@ -130,7 +130,7 @@ impl sp_trie::TrieRecorderProvider for SizeOnlyRecorderPr } } -impl ProofSizeProvider for SizeOnlyRecorderProvider { +impl ProofSizeProvider for SizeOnlyRecorderProvider { fn estimate_encoded_size(&self) -> usize { *self.encoded_size.borrow() } @@ -145,22 +145,23 @@ mod tests { use rand::Rng; use sp_trie::{ cache::{CacheSize, SharedTrieCache}, - MemoryDB, ProofSizeProvider, TrieRecorderProvider, + DBLocation, MemoryDB, ProofSizeProvider, TrieRecorderProvider, + }; + use trie_db::{ + test_utils::{Alphabet, StandardMap, ValueMode}, + Trie, TrieDBBuilder, TrieDBMutBuilder, TrieHash, TrieRecorder, }; - use trie_db::{Trie, TrieDBBuilder, TrieDBMutBuilder, TrieHash, TrieMut, TrieRecorder}; - use trie_standardmap::{Alphabet, StandardMap, ValueMode}; use super::*; - type Recorder = sp_trie::recorder::Recorder; + type Recorder = sp_trie::recorder::Recorder; fn create_trie() -> ( sp_trie::MemoryDB, - TrieHash>, + TrieHash>, Vec<(Vec, Vec)>, ) { let mut db = MemoryDB::default(); - let mut root = Default::default(); let mut seed = Default::default(); let test_data: Vec<(Vec, Vec)> = StandardMap { @@ -180,15 +181,14 @@ mod tests { .collect(); // Fill database with values - { - let mut trie = TrieDBMutBuilder::>::new( - &mut db, &mut root, - ) - .build(); - for (k, v) in &test_data { - trie.insert(k, v).expect("Inserts data"); - } + let mut trie = + TrieDBMutBuilder::>::new(&mut db) + .build(); + for (k, v) in &test_data { + trie.insert(k, v).expect("Inserts data"); } + let change_set = trie.commit(); + let root = change_set.apply_to(&mut db); (db, root, test_data) } @@ -202,28 +202,31 @@ mod tests { let reference_recorder = Recorder::default(); let recorder_for_test: SizeOnlyRecorderProvider = SizeOnlyRecorderProvider::new(); - let reference_cache: SharedTrieCache = + let reference_cache: SharedTrieCache = SharedTrieCache::new(CacheSize::new(1024 * 5)); - let cache_for_test: SharedTrieCache = + let cache_for_test: SharedTrieCache = SharedTrieCache::new(CacheSize::new(1024 * 5)); { let local_cache = cache_for_test.local_cache(); let mut trie_cache_for_reference = local_cache.as_trie_db_cache(root); let mut reference_trie_recorder = reference_recorder.as_trie_recorder(root); - let reference_trie = - TrieDBBuilder::>::new(&db, &root) - .with_recorder(&mut reference_trie_recorder) - .with_cache(&mut trie_cache_for_reference) - .build(); + let reference_trie = TrieDBBuilder::< + sp_trie::LayoutV1, + >::new(&db, &root) + .with_recorder(&mut reference_trie_recorder) + .with_cache(&mut trie_cache_for_reference) + .build(); let local_cache_for_test = reference_cache.local_cache(); let mut trie_cache_for_test = local_cache_for_test.as_trie_db_cache(root); let mut trie_recorder_under_test = recorder_for_test.as_trie_recorder(root); let test_trie = - TrieDBBuilder::>::new(&db, &root) - .with_recorder(&mut trie_recorder_under_test) - .with_cache(&mut trie_cache_for_test) - .build(); + TrieDBBuilder::>::new( + &db, &root, + ) + .with_recorder(&mut trie_recorder_under_test) + .with_cache(&mut trie_cache_for_test) + .build(); // Access random values from the test data for _ in 0..100 { @@ -259,16 +262,19 @@ mod tests { SizeOnlyRecorderProvider::new(); { let mut reference_trie_recorder = reference_recorder.as_trie_recorder(root); - let reference_trie = - TrieDBBuilder::>::new(&db, &root) - .with_recorder(&mut reference_trie_recorder) - .build(); + let reference_trie = TrieDBBuilder::< + sp_trie::LayoutV1, + >::new(&db, &root) + .with_recorder(&mut reference_trie_recorder) + .build(); let mut trie_recorder_under_test = recorder_for_test.as_trie_recorder(root); let test_trie = - TrieDBBuilder::>::new(&db, &root) - .with_recorder(&mut trie_recorder_under_test) - .build(); + TrieDBBuilder::>::new( + &db, &root, + ) + .with_recorder(&mut trie_recorder_under_test) + .build(); for _ in 0..200 { let index: usize = rng.gen_range(0..test_data.len()); diff --git a/cumulus/primitives/proof-size-hostfunction/src/lib.rs b/cumulus/primitives/proof-size-hostfunction/src/lib.rs index 8ebc58ea450d..91b01def995c 100644 --- a/cumulus/primitives/proof-size-hostfunction/src/lib.rs +++ b/cumulus/primitives/proof-size-hostfunction/src/lib.rs @@ -46,30 +46,29 @@ mod tests { use sp_core::Blake2Hasher; use sp_state_machine::TestExternalities; use sp_trie::{ - proof_size_extension::ProofSizeExt, recorder::Recorder, LayoutV1, PrefixedMemoryDB, - TrieDBMutBuilder, TrieMut, + proof_size_extension::ProofSizeExt, recorder::Recorder, DBLocation, LayoutV1, + PrefixedMemoryDB, TrieDBMutBuilder, }; use crate::{storage_proof_size, PROOF_RECORDING_DISABLED}; const TEST_DATA: &[(&[u8], &[u8])] = &[(b"key1", &[1; 64]), (b"key2", &[2; 64])]; - type TestLayout = LayoutV1; + type TestLayout = LayoutV1; - fn get_prepared_test_externalities() -> (TestExternalities, Recorder) - { + fn get_prepared_test_externalities( + ) -> (TestExternalities, Recorder) { let mut db = PrefixedMemoryDB::default(); - let mut root = Default::default(); - { - let mut trie = TrieDBMutBuilder::::new(&mut db, &mut root).build(); - for (k, v) in TEST_DATA { - trie.insert(k, v).expect("Inserts data"); - } + let mut trie = TrieDBMutBuilder::::new(&mut db).build(); + for (k, v) in TEST_DATA { + trie.insert(k, v).expect("Inserts data"); } + let change_set = trie.commit(); + let root = change_set.apply_to(&mut db); - let recorder: sp_trie::recorder::Recorder = Default::default(); - let trie_backend = sp_state_machine::TrieBackendBuilder::new(db, root) + let recorder: sp_trie::recorder::Recorder = Default::default(); + let trie_backend = sp_state_machine::TrieBackendBuilder::new(Box::new(db), root) .with_recorder(recorder.clone()) .build(); diff --git a/cumulus/test/relay-sproof-builder/src/lib.rs b/cumulus/test/relay-sproof-builder/src/lib.rs index fbd2692a36b4..5c8ab88e8091 100644 --- a/cumulus/test/relay-sproof-builder/src/lib.rs +++ b/cumulus/test/relay-sproof-builder/src/lib.rs @@ -20,7 +20,7 @@ use cumulus_primitives_core::{ use polkadot_primitives::UpgradeGoAhead; use sp_runtime::traits::HashingFor; use sp_std::collections::btree_map::BTreeMap; -use sp_trie::PrefixedMemoryDB; +use sp_trie::MemoryDB; /// Builds a sproof (portmanteau of 'spoof' and 'proof') of the relay chain state. #[derive(Clone)] @@ -130,10 +130,9 @@ impl RelayStateSproofBuilder { pub fn into_state_root_and_proof( self, ) -> (polkadot_primitives::Hash, sp_state_machine::StorageProof) { - let (db, root) = - PrefixedMemoryDB::>::default_with_root(); + let (db, root) = MemoryDB::>::default_with_root(); let state_version = Default::default(); // for test using default. - let mut backend = sp_state_machine::TrieBackendBuilder::new(db, root).build(); + let mut backend = sp_state_machine::TrieBackendBuilder::new(Box::new(db), root).build(); let mut relevant_keys = Vec::new(); { diff --git a/polkadot/erasure-coding/src/lib.rs b/polkadot/erasure-coding/src/lib.rs index e5155df4beba..5d2d5fe579b9 100644 --- a/polkadot/erasure-coding/src/lib.rs +++ b/polkadot/erasure-coding/src/lib.rs @@ -29,8 +29,8 @@ use polkadot_node_primitives::{AvailableData, Proof}; use polkadot_primitives::{BlakeTwo256, Hash as H256, HashT}; use sp_core::Blake2Hasher; use sp_trie::{ - trie_types::{TrieDBBuilder, TrieDBMutBuilderV0 as TrieDBMutBuilder}, - LayoutV0, MemoryDB, Trie, TrieMut, EMPTY_PREFIX, + trie_types::{TrieDBBuilderV1, TrieDBMutBuilderV0 as TrieDBMutBuilder}, + LayoutV0, MemoryDB, Trie, EMPTY_PREFIX, }; use thiserror::Error; @@ -215,9 +215,9 @@ impl<'a, I: AsRef<[u8]>> Iterator for Branches<'a, I> { fn next(&mut self) -> Option { use sp_trie::Recorder; - let mut recorder = Recorder::>::new(); + let mut recorder = Recorder::>::new(); let res = { - let trie = TrieDBBuilder::new(&self.trie_storage, &self.root) + let trie = TrieDBBuilderV1::<_, ()>::new(&self.trie_storage, &self.root) .with_recorder(&mut recorder) .build(); @@ -245,19 +245,16 @@ where I: AsRef<[u8]>, { let mut trie_storage: MemoryDB = MemoryDB::default(); - let mut root = H256::default(); - // construct trie mapping each chunk's index to its hash. - { - let mut trie = TrieDBMutBuilder::new(&mut trie_storage, &mut root).build(); - for (i, chunk) in chunks.as_ref().iter().enumerate() { - (i as u32).using_encoded(|encoded_index| { - let chunk_hash = BlakeTwo256::hash(chunk.as_ref()); - trie.insert(encoded_index, chunk_hash.as_ref()) - .expect("a fresh trie stored in memory cannot have errors loading nodes; qed"); - }) - } + let mut trie = TrieDBMutBuilder::new(&trie_storage).build(); + for (i, chunk) in chunks.as_ref().iter().enumerate() { + (i as u32).using_encoded(|encoded_index| { + let chunk_hash = BlakeTwo256::hash(chunk.as_ref()); + trie.insert(encoded_index, chunk_hash.as_ref()) + .expect("a fresh trie stored in memory cannot have errors loading nodes; qed"); + }) } + let root = trie.commit().apply_to(&mut trie_storage); Branches { trie_storage, root, chunks, current_pos: 0 } } @@ -267,10 +264,10 @@ where pub fn branch_hash(root: &H256, branch_nodes: &Proof, index: usize) -> Result { let mut trie_storage: MemoryDB = MemoryDB::default(); for node in branch_nodes.iter() { - (&mut trie_storage as &mut sp_trie::HashDB<_>).insert(EMPTY_PREFIX, node); + trie_storage.insert(EMPTY_PREFIX, node); } - let trie = TrieDBBuilder::new(&trie_storage, &root).build(); + let trie = TrieDBBuilderV1::<_, ()>::new(&trie_storage, &root).build(); let res = (index as u32).using_encoded(|key| { trie.get_with(key, |raw_hash: &[u8]| H256::decode(&mut &raw_hash[..])) }); diff --git a/polkadot/node/service/Cargo.toml b/polkadot/node/service/Cargo.toml index 734dcbdeb441..bb4ede18d291 100644 --- a/polkadot/node/service/Cargo.toml +++ b/polkadot/node/service/Cargo.toml @@ -90,7 +90,7 @@ serde_json = { workspace = true, default-features = true } thiserror = { workspace = true } kvdb = "0.13.0" kvdb-rocksdb = { version = "0.19.0", optional = true } -parity-db = { version = "0.4.12", optional = true } +parity-db = { version = "0.4.13", optional = true } codec = { package = "parity-scale-codec", version = "3.6.1" } parking_lot = "0.12.1" bitvec = { version = "1.0.1", optional = true } diff --git a/polkadot/node/subsystem-util/Cargo.toml b/polkadot/node/subsystem-util/Cargo.toml index a668f8de76a0..6004f5a7a04d 100644 --- a/polkadot/node/subsystem-util/Cargo.toml +++ b/polkadot/node/subsystem-util/Cargo.toml @@ -40,7 +40,7 @@ sp-keystore = { path = "../../../substrate/primitives/keystore" } sc-client-api = { path = "../../../substrate/client/api" } kvdb = "0.13.0" -parity-db = { version = "0.4.12" } +parity-db = { version = "0.4.13" } [dev-dependencies] assert_matches = "1.4.0" diff --git a/polkadot/node/test/service/src/lib.rs b/polkadot/node/test/service/src/lib.rs index ca904bae28db..2c3f870fef5e 100644 --- a/polkadot/node/test/service/src/lib.rs +++ b/polkadot/node/test/service/src/lib.rs @@ -167,7 +167,9 @@ pub fn node_config( transaction_pool: Default::default(), network: network_config, keystore: KeystoreConfig::InMemory, - database: DatabaseSource::RocksDb { path: root.join("db"), cache_size: 128 }, + //database: DatabaseSource::RocksDb { path: root.join("db"), cache_size: 128 }, // TODO + // restore rocksdb? + database: DatabaseSource::ParityDb { path: root.join("db"), multi_tree: true }, trie_cache_maximum_size: Some(64 * 1024 * 1024), state_pruning: Default::default(), blocks_pruning: BlocksPruning::KeepFinalized, diff --git a/polkadot/tests/purge_chain_works.rs b/polkadot/tests/purge_chain_works.rs index f5a73e232e0c..256766d7295e 100644 --- a/polkadot/tests/purge_chain_works.rs +++ b/polkadot/tests/purge_chain_works.rs @@ -119,3 +119,49 @@ async fn purge_chain_paritydb_works() { }) .await; } + +#[tokio::test] +async fn purge_chain_paritydbmulti_works() { + run_with_timeout(Duration::from_secs(10 * 60), async move { + let tmpdir = tempdir().expect("could not create temp dir"); + + let mut cmd = Command::new(cargo_bin("polkadot")) + .stdout(process::Stdio::piped()) + .stderr(process::Stdio::piped()) + .args(["--dev", "-d"]) + .arg(tmpdir.path()) + .arg("--database") + .arg("paritydbmulti") + .arg("--no-hardware-benchmarks") + .spawn() + .unwrap(); + + let (ws_url, _) = common::find_ws_url_from_output(cmd.stderr.take().unwrap()); + + // Let it produce 1 block. + common::wait_n_finalized_blocks(1, &ws_url).await; + + // Send SIGINT to node. + kill(Pid::from_raw(cmd.id().try_into().unwrap()), SIGINT).unwrap(); + // Wait for the node to handle it and exit. + assert!(cmd.wait().unwrap().success()); + assert!(tmpdir.path().join("chains/rococo_dev").exists()); + assert!(tmpdir.path().join("chains/rococo_dev/paritydb/full").exists()); + + // Purge chain + let status = Command::new(cargo_bin("polkadot")) + .args(["purge-chain", "--dev", "-d"]) + .arg(tmpdir.path()) + .arg("--database") + .arg("paritydbmulti") + .arg("-y") + .status() + .unwrap(); + assert!(status.success()); + + // Make sure that the chain folder exists, but `db/full` is deleted. + assert!(tmpdir.path().join("chains/rococo_dev").exists()); + assert!(!tmpdir.path().join("chains/rococo_dev/paritydb/full").exists()); + }) + .await; +} diff --git a/substrate/bin/node/bench/Cargo.toml b/substrate/bin/node/bench/Cargo.toml index 8e0f7fb93b39..d45d27809c8f 100644 --- a/substrate/bin/node/bench/Cargo.toml +++ b/substrate/bin/node/bench/Cargo.toml @@ -22,6 +22,7 @@ node-primitives = { path = "../primitives" } node-testing = { path = "../testing" } kitchensink-runtime = { path = "../runtime" } sc-client-api = { path = "../../../client/api" } +sc-client-db = { path = "../../../client/db" } sp-runtime = { path = "../../../primitives/runtime" } sp-state-machine = { path = "../../../primitives/state-machine" } serde = { workspace = true, default-features = true } @@ -36,12 +37,12 @@ sc-basic-authorship = { path = "../../../client/basic-authorship" } sp-inherents = { path = "../../../primitives/inherents" } sp-timestamp = { path = "../../../primitives/timestamp", default-features = false } sp-tracing = { path = "../../../primitives/tracing" } -hash-db = "0.16.0" +trie-db = { package = "subtrie", version = "0.0.1" } tempfile = "3.1.0" fs_extra = "1" rand = { version = "0.8.5", features = ["small_rng"] } lazy_static = "1.4.0" -parity-db = "0.4.12" +parity-db = "0.4.13" sc-transaction-pool = { path = "../../../client/transaction-pool" } sc-transaction-pool-api = { path = "../../../client/transaction-pool/api" } futures = { version = "0.3.21", features = ["thread-pool"] } diff --git a/substrate/bin/node/bench/src/construct.rs b/substrate/bin/node/bench/src/construct.rs index 23d0a0cc1ee5..1c4389623b49 100644 --- a/substrate/bin/node/bench/src/construct.rs +++ b/substrate/bin/node/bench/src/construct.rs @@ -72,6 +72,7 @@ impl core::BenchmarkDescription for ConstructionBenchmarkDescription { match self.database_type { DatabaseType::RocksDb => path.push("rocksdb"), DatabaseType::ParityDb => path.push("paritydb"), + DatabaseType::ParityDbMulti => path.push("paritydbmulti"), } path.push(&format!("{}", self.size)); diff --git a/substrate/bin/node/bench/src/generator.rs b/substrate/bin/node/bench/src/generator.rs index 0fe0826028f5..a1f64425bafc 100644 --- a/substrate/bin/node/bench/src/generator.rs +++ b/substrate/bin/node/bench/src/generator.rs @@ -16,54 +16,36 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use std::{collections::HashMap, sync::Arc}; - -use kvdb::KeyValueDB; -use node_primitives::Hash; -use sp_trie::{trie_types::TrieDBMutBuilderV1, TrieMut}; - -use crate::simple_trie::SimpleTrie; +use node_primitives::{Block, Hash}; +use sp_runtime::traits::BlakeTwo256; +use sp_trie::trie_types::TrieDBMutBuilderV1; /// Generate trie from given `key_values`. /// /// Will fill your database `db` with trie data from `key_values` and /// return root. pub fn generate_trie( - db: Arc, + mut db: sc_client_db::StorageDb, key_values: impl IntoIterator, Vec)>, ) -> Hash { - let mut root = Hash::default(); + db.insert_empty_trie_node(); - let (db, overlay) = { - let mut overlay = HashMap::new(); - overlay.insert( - array_bytes::hex2bytes( - "03170a2e7597b7b7e3d84c05391d139a62b157e78786d8c082f29dcf4c111314", - ) - .expect("null key is valid"), - Some(vec![0]), - ); - let mut trie = SimpleTrie { db, overlay: &mut overlay }; - { - let mut trie_db = - TrieDBMutBuilderV1::::new(&mut trie, &mut root).build(); - for (key, value) in key_values { - trie_db.insert(&key, &value).expect("trie insertion failed"); - } + let mut trie_db = TrieDBMutBuilderV1::::new(&db).build(); + for (key, value) in key_values { + trie_db.insert(&key, &value).expect("trie insertion failed"); + } - trie_db.commit(); - } - (trie.db, overlay) - }; + let commit = trie_db.commit(); + let root = commit.root_hash(); - let mut transaction = db.transaction(); - for (key, value) in overlay.into_iter() { - match value { - Some(value) => transaction.put(0, &key[..], &value[..]), - None => transaction.delete(0, &key[..]), - } - } - db.write(transaction).expect("Failed to write transaction"); + let mut transaction = sc_client_db::Transaction::default(); + sc_client_db::apply_tree_commit::( + commit, + db.db.state_capabilities(), + &mut transaction, + ); + + db.db.commit(transaction).expect("Failed to write transaction"); root } diff --git a/substrate/bin/node/bench/src/import.rs b/substrate/bin/node/bench/src/import.rs index 78b280076e0b..78a443189499 100644 --- a/substrate/bin/node/bench/src/import.rs +++ b/substrate/bin/node/bench/src/import.rs @@ -73,6 +73,7 @@ impl core::BenchmarkDescription for ImportBenchmarkDescription { match self.database_type { DatabaseType::RocksDb => path.push("rocksdb"), DatabaseType::ParityDb => path.push("paritydb"), + DatabaseType::ParityDbMulti => path.push("paritydbmulti"), } path.push(&format!("{}", self.size)); diff --git a/substrate/bin/node/bench/src/main.rs b/substrate/bin/node/bench/src/main.rs index 1f69c9769580..8711b36192dc 100644 --- a/substrate/bin/node/bench/src/main.rs +++ b/substrate/bin/node/bench/src/main.rs @@ -119,7 +119,7 @@ fn main() { ] .iter().flat_map(|size| [ - DatabaseType::RocksDb, DatabaseType::ParityDb + DatabaseType::RocksDb, DatabaseType::ParityDb, DatabaseType::ParityDbMulti, ] .iter().map(move |db_type| (size, db_type))) => TrieReadBenchmarkDescription { database_size: *size, database_type: *db_type }, @@ -130,7 +130,8 @@ fn main() { ] .iter().flat_map(|size| [ - DatabaseType::RocksDb, DatabaseType::ParityDb + DatabaseType::RocksDb, DatabaseType::ParityDb, DatabaseType::ParityDbMulti, + ] .iter().map(move |db_type| (size, db_type))) => TrieWriteBenchmarkDescription { database_size: *size, database_type: *db_type }, @@ -138,7 +139,7 @@ fn main() { key_types: KeyTypes::Sr25519, block_type: BlockType::RandomTransfersKeepAlive, size: SizeType::Medium, - database_type: BenchDataBaseType::RocksDb, + database_type: BenchDataBaseType::RocksDb, // TODO some for paritydb?? }, ConstructionBenchmarkDescription { key_types: KeyTypes::Sr25519, diff --git a/substrate/bin/node/bench/src/simple_trie.rs b/substrate/bin/node/bench/src/simple_trie.rs index 6d5072358d23..df27ed9b337b 100644 --- a/substrate/bin/node/bench/src/simple_trie.rs +++ b/substrate/bin/node/bench/src/simple_trie.rs @@ -16,57 +16,39 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use std::{collections::HashMap, sync::Arc}; +use std::sync::Arc; -use hash_db::{AsHashDB, HashDB, Hasher as _, Prefix}; use kvdb::KeyValueDB; use node_primitives::Hash; -use sp_trie::DBValue; +use sp_core::H256; +use sp_trie::{DBLocation, DBValue, MemoryDB}; +use trie_db::node_db::{NodeDB, Prefix}; pub type Hasher = sp_core::Blake2Hasher; /// Immutable generated trie database with root. pub struct SimpleTrie<'a> { pub db: Arc, - pub overlay: &'a mut HashMap, Option>>, + pub overlay: &'a mut MemoryDB, } -impl<'a> AsHashDB for SimpleTrie<'a> { - fn as_hash_db(&self) -> &dyn hash_db::HashDB { - self - } - - fn as_hash_db_mut<'b>(&'b mut self) -> &'b mut (dyn HashDB + 'b) { - &mut *self - } -} - -impl<'a> HashDB for SimpleTrie<'a> { - fn get(&self, key: &Hash, prefix: Prefix) -> Option { - let key = sp_trie::prefixed_key::(key, prefix); - if let Some(value) = self.overlay.get(&key) { - return value.clone() +impl<'a> NodeDB for SimpleTrie<'a> { + fn get( + &self, + key: &H256, + prefix: Prefix, + _locaton: DBLocation, + ) -> Option<(DBValue, Vec)> { + if let Some(value) = self.overlay.get(&key, prefix) { + return Some((value.clone(), vec![])); } - self.db.get(0, &key).expect("Database backend error") - } - - fn contains(&self, hash: &Hash, prefix: Prefix) -> bool { - self.get(hash, prefix).is_some() - } - - fn insert(&mut self, prefix: Prefix, value: &[u8]) -> Hash { - let key = Hasher::hash(value); - self.emplace(key, prefix, value.to_vec()); - key - } - - fn emplace(&mut self, key: Hash, prefix: Prefix, value: DBValue) { - let key = sp_trie::prefixed_key::(&key, prefix); - self.overlay.insert(key, Some(value)); + self.db + .get(0, key.as_ref()) + .expect("Database backend error") + .map(|v| (v, vec![])) } - fn remove(&mut self, key: &Hash, prefix: Prefix) { - let key = sp_trie::prefixed_key::(key, prefix); - self.overlay.insert(key, None); + fn contains(&self, hash: &Hash, prefix: Prefix, location: DBLocation) -> bool { + self.get(hash, prefix, location).is_some() } } diff --git a/substrate/bin/node/bench/src/tempdb.rs b/substrate/bin/node/bench/src/tempdb.rs index f3fd693d21fe..5e695db509ee 100644 --- a/substrate/bin/node/bench/src/tempdb.rs +++ b/substrate/bin/node/bench/src/tempdb.rs @@ -16,59 +16,19 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use kvdb::{DBKeyValue, DBTransaction, KeyValueDB}; -use kvdb_rocksdb::{Database, DatabaseConfig}; -use std::{io, path::PathBuf, sync::Arc}; +use node_primitives::Block; +use sc_client_db::DatabaseSource; +use std::path::PathBuf; #[derive(Clone, Copy, Debug)] pub enum DatabaseType { RocksDb, ParityDb, + ParityDbMulti, } pub struct TempDatabase(tempfile::TempDir); -struct ParityDbWrapper(parity_db::Db); - -impl KeyValueDB for ParityDbWrapper { - /// Get a value by key. - fn get(&self, col: u32, key: &[u8]) -> io::Result>> { - Ok(self.0.get(col as u8, &key[key.len() - 32..]).expect("db error")) - } - - /// Get a value by partial key. Only works for flushed data. - fn get_by_prefix(&self, _col: u32, _prefix: &[u8]) -> io::Result>> { - unimplemented!() - } - - /// Write a transaction of changes to the buffer. - fn write(&self, transaction: DBTransaction) -> io::Result<()> { - self.0 - .commit(transaction.ops.iter().map(|op| match op { - kvdb::DBOp::Insert { col, key, value } => - (*col as u8, &key[key.len() - 32..], Some(value.to_vec())), - kvdb::DBOp::Delete { col, key } => (*col as u8, &key[key.len() - 32..], None), - kvdb::DBOp::DeletePrefix { col: _, prefix: _ } => unimplemented!(), - })) - .expect("db error"); - Ok(()) - } - - /// Iterate over flushed data for a given column. - fn iter<'a>(&'a self, _col: u32) -> Box> + 'a> { - unimplemented!() - } - - /// Iterate over flushed data for a given column, starting from a given prefix. - fn iter_with_prefix<'a>( - &'a self, - _col: u32, - _prefix: &'a [u8], - ) -> Box> + 'a> { - unimplemented!() - } -} - impl TempDatabase { pub fn new() -> Self { let dir = tempfile::tempdir().expect("temp dir creation failed"); @@ -81,21 +41,32 @@ impl TempDatabase { TempDatabase(dir) } - pub fn open(&mut self, db_type: DatabaseType) -> Arc { + pub fn open(&mut self, db_type: DatabaseType) -> sc_client_db::StorageDb { match db_type { DatabaseType::RocksDb => { - let db_cfg = DatabaseConfig::with_columns(1); - let db = Database::open(&db_cfg, &self.0.path()).expect("Database backend error"); - Arc::new(db) + let db = sc_client_db::open_database::( + &DatabaseSource::RocksDb { + path: self.0.path().into(), + cache_size: 128 * 1024 * 1024, + }, + true, + false, + ) + .expect("Database backend error"); + sc_client_db::StorageDb:: { db, state_db: None } + }, + DatabaseType::ParityDbMulti | DatabaseType::ParityDb => { + let db = sc_client_db::open_database::( + &DatabaseSource::ParityDb { + path: self.0.path().into(), + multi_tree: matches!(db_type, DatabaseType::ParityDbMulti), + }, + true, + false, + ) + .expect("Database backend error"); + sc_client_db::StorageDb:: { db, state_db: None } }, - DatabaseType::ParityDb => Arc::new(ParityDbWrapper({ - let mut options = parity_db::Options::with_columns(self.0.path(), 1); - let column_options = &mut options.columns[0]; - column_options.ref_counted = true; - column_options.preimage = true; - column_options.uniform = true; - parity_db::Db::open_or_create(&options).expect("db open error") - })), } } } diff --git a/substrate/bin/node/bench/src/trie.rs b/substrate/bin/node/bench/src/trie.rs index 09ab405c03b2..3be41dab4e37 100644 --- a/substrate/bin/node/bench/src/trie.rs +++ b/substrate/bin/node/bench/src/trie.rs @@ -18,20 +18,18 @@ //! Trie benchmark (integrated). -use hash_db::Prefix; -use kvdb::KeyValueDB; use lazy_static::lazy_static; use rand::Rng; +use sp_runtime::traits::BlakeTwo256; use sp_state_machine::Backend as _; -use sp_trie::{trie_types::TrieDBMutBuilderV1, TrieMut as _}; -use std::{borrow::Cow, collections::HashMap, sync::Arc}; +use sp_trie::trie_types::TrieDBMutBuilderV1; +use std::borrow::Cow; use node_primitives::Hash; use crate::{ core::{self, Mode, Path}, generator::generate_trie, - simple_trie::SimpleTrie, tempdb::{DatabaseType, TempDatabase}, }; @@ -164,23 +162,13 @@ impl core::BenchmarkDescription for TrieReadBenchmarkDescription { } } -struct Storage(Arc); - -impl sp_state_machine::Storage for Storage { - fn get(&self, key: &Hash, prefix: Prefix) -> Result>, String> { - let key = sp_trie::prefixed_key::(key, prefix); - self.0.get(0, &key).map_err(|e| format!("Database backend error: {:?}", e)) - } -} - impl core::Benchmark for TrieReadBenchmark { fn run(&mut self, mode: Mode) -> std::time::Duration { let mut db = self.database.clone(); - let storage: Arc> = - Arc::new(Storage(db.open(self.database_type))); - - let trie_backend = sp_state_machine::TrieBackendBuilder::new(storage, self.root).build(); + let db = db.open(self.database_type); + let trie_backend = + sp_state_machine::TrieBackendBuilder::<_>::new(Box::new(db), self.root).build(); for (warmup_key, warmup_value) in self.warmup_keys.iter() { let value = trie_backend .storage(&warmup_key[..]) @@ -280,13 +268,9 @@ impl core::Benchmark for TrieWriteBenchmark { fn run(&mut self, mode: Mode) -> std::time::Duration { let mut rng = rand::thread_rng(); let mut db = self.database.clone(); - let kvdb = db.open(self.database_type); - - let mut new_root = self.root; + let db = db.open(self.database_type); - let mut overlay = HashMap::new(); - let mut trie = SimpleTrie { db: kvdb.clone(), overlay: &mut overlay }; - let mut trie_db_mut = TrieDBMutBuilderV1::from_existing(&mut trie, &mut new_root).build(); + let mut trie_db_mut = TrieDBMutBuilderV1::from_existing(&db, self.root).build(); for (warmup_key, warmup_value) in self.warmup_keys.iter() { let value = trie_db_mut @@ -308,17 +292,16 @@ impl core::Benchmark for TrieWriteBenchmark { let started = std::time::Instant::now(); trie_db_mut.insert(&test_key, &test_val).expect("Should be inserted ok"); - trie_db_mut.commit(); - drop(trie_db_mut); - - let mut transaction = kvdb.transaction(); - for (key, value) in overlay.into_iter() { - match value { - Some(value) => transaction.put(0, &key[..], &value[..]), - None => transaction.delete(0, &key[..]), - } - } - kvdb.write(transaction).expect("Failed to write transaction"); + let commit = trie_db_mut.commit(); + let new_root = commit.root_hash(); + + let mut transaction = sc_client_db::Transaction::default(); + sc_client_db::apply_tree_commit::( + commit, + db.db.state_capabilities(), + &mut transaction, + ); + db.db.commit(transaction).expect("Failed to write transaction"); let elapsed = started.elapsed(); diff --git a/substrate/bin/node/cli/benches/block_production.rs b/substrate/bin/node/cli/benches/block_production.rs index 23a62cc0bd24..ae24e9457bde 100644 --- a/substrate/bin/node/cli/benches/block_production.rs +++ b/substrate/bin/node/cli/benches/block_production.rs @@ -66,7 +66,9 @@ fn new_node(tokio_handle: Handle) -> node_cli::service::NewFullBase { transaction_pool: Default::default(), network: network_config, keystore: KeystoreConfig::InMemory, - database: DatabaseSource::RocksDb { path: root.join("db"), cache_size: 128 }, + //database: DatabaseSource::RocksDb { path: root.join("db"), cache_size: 128 }, TODO + // restore rocksdb? + database: DatabaseSource::ParityDb { path: root.join("db"), multi_tree: true }, trie_cache_maximum_size: Some(64 * 1024 * 1024), state_pruning: Some(PruningMode::ArchiveAll), blocks_pruning: BlocksPruning::KeepAll, diff --git a/substrate/bin/node/cli/benches/executor.rs b/substrate/bin/node/cli/benches/executor.rs index a326e1a79ea3..c702c564c584 100644 --- a/substrate/bin/node/cli/benches/executor.rs +++ b/substrate/bin/node/cli/benches/executor.rs @@ -30,7 +30,7 @@ use sp_core::{ traits::{CallContext, CodeExecutor, RuntimeCode}, }; use sp_runtime::traits::BlakeTwo256; -use sp_state_machine::TestExternalities as CoreTestExternalities; +use sp_state_machine::{DBLocation, TestExternalities as CoreTestExternalities}; use staging_node_cli::service::RuntimeExecutor; criterion_group!(benches, bench_execute_block); @@ -82,10 +82,11 @@ fn construct_block( let extrinsics = extrinsics.into_iter().map(sign).collect::>(); // calculate the header fields that we can. - let extrinsics_root = - LayoutV0::::ordered_trie_root(extrinsics.iter().map(Encode::encode)) - .to_fixed_bytes() - .into(); + let extrinsics_root = LayoutV0::::ordered_trie_root( + extrinsics.iter().map(Encode::encode), + ) + .to_fixed_bytes() + .into(); let header = Header { parent_hash, diff --git a/substrate/bin/node/cli/benches/transaction_pool.rs b/substrate/bin/node/cli/benches/transaction_pool.rs index de4eef1944d4..7a33ae8bd80b 100644 --- a/substrate/bin/node/cli/benches/transaction_pool.rs +++ b/substrate/bin/node/cli/benches/transaction_pool.rs @@ -64,7 +64,9 @@ fn new_node(tokio_handle: Handle) -> node_cli::service::NewFullBase { }, network: network_config, keystore: KeystoreConfig::InMemory, - database: DatabaseSource::RocksDb { path: root.join("db"), cache_size: 128 }, + //database: DatabaseSource::RocksDb { path: root.join("db"), cache_size: 128 }, TODO + // restore rocksdb ?? + database: DatabaseSource::ParityDb { path: root.join("db"), multi_tree: true }, trie_cache_maximum_size: Some(64 * 1024 * 1024), state_pruning: Some(PruningMode::ArchiveAll), blocks_pruning: BlocksPruning::KeepAll, diff --git a/substrate/bin/node/cli/tests/common.rs b/substrate/bin/node/cli/tests/common.rs index 2d74cdd5a041..8dfc8745ecf7 100644 --- a/substrate/bin/node/cli/tests/common.rs +++ b/substrate/bin/node/cli/tests/common.rs @@ -145,7 +145,7 @@ pub fn construct_block( // calculate the header fields that we can. let extrinsics_root = - Layout::::ordered_trie_root(extrinsics.iter().map(Encode::encode)) + Layout::::ordered_trie_root(extrinsics.iter().map(Encode::encode)) .to_fixed_bytes() .into(); diff --git a/substrate/bin/node/testing/src/bench.rs b/substrate/bin/node/testing/src/bench.rs index df302a6453b9..c47298d94751 100644 --- a/substrate/bin/node/testing/src/bench.rs +++ b/substrate/bin/node/testing/src/bench.rs @@ -214,13 +214,17 @@ pub enum DatabaseType { RocksDb, /// Parity DB backend. ParityDb, + /// Parity DB backend, multi tree active. + ParityDbMulti, } impl DatabaseType { fn into_settings(self, path: PathBuf) -> sc_client_db::DatabaseSource { match self { Self::RocksDb => sc_client_db::DatabaseSource::RocksDb { path, cache_size: 512 }, - Self::ParityDb => sc_client_db::DatabaseSource::ParityDb { path }, + Self::ParityDb => sc_client_db::DatabaseSource::ParityDb { path, multi_tree: false }, + Self::ParityDbMulti => + sc_client_db::DatabaseSource::ParityDb { path, multi_tree: true }, } } } diff --git a/substrate/client/api/src/backend.rs b/substrate/client/api/src/backend.rs index 31b100433c70..d412acf9e84a 100644 --- a/substrate/client/api/src/backend.rs +++ b/substrate/client/api/src/backend.rs @@ -29,15 +29,16 @@ use sp_runtime::{ Justification, Justifications, StateVersion, Storage, }; use sp_state_machine::{ - backend::AsTrieBackend, ChildStorageCollection, IndexOperation, IterArgs, - OffchainChangesCollection, StorageCollection, StorageIterator, + backend::{AsTrieBackend, BackendTransaction}, + ChildStorageCollection, IndexOperation, IterArgs, OffchainChangesCollection, StorageCollection, + StorageIterator, }; use sp_storage::{ChildInfo, StorageData, StorageKey}; pub use sp_trie::MerkleValue; use crate::{blockchain::Backend as BlockchainBackend, UsageInfo}; -pub use sp_state_machine::{Backend as StateBackend, BackendTransaction, KeyValueStates}; +pub use sp_state_machine::{Backend as StateBackend, KeyValueStates}; /// Extracts the state backend type for the given backend. pub type StateBackendFor = >::State; @@ -176,7 +177,7 @@ pub trait BlockImportOperation { /// Inject storage data into the database. fn update_db_storage( &mut self, - update: BackendTransaction>, + update: BackendTransaction, ) -> sp_blockchain::Result<()>; /// Set genesis state. If `commit` is `false` the state is saved in memory, but is not written @@ -516,12 +517,7 @@ pub trait Backend: AuxStore + Send + Sync { /// Associated blockchain backend type. type Blockchain: BlockchainBackend; /// Associated state backend type. - type State: StateBackend> - + Send - + AsTrieBackend< - HashingFor, - TrieBackendStorage = >>::TrieBackendStorage, - >; + type State: StateBackend> + Send + AsTrieBackend>; /// Offchain workers local storage. type OffchainStorage: OffchainStorage; diff --git a/substrate/client/api/src/in_mem.rs b/substrate/client/api/src/in_mem.rs index b933ed1f17e0..ceb86171855a 100644 --- a/substrate/client/api/src/in_mem.rs +++ b/substrate/client/api/src/in_mem.rs @@ -29,7 +29,7 @@ use sp_runtime::{ Justification, Justifications, StateVersion, Storage, }; use sp_state_machine::{ - Backend as StateBackend, BackendTransaction, ChildStorageCollection, InMemoryBackend, + backend::BackendTransaction, Backend as StateBackend, ChildStorageCollection, InMemoryBackend, IndexOperation, StorageCollection, }; use std::{ @@ -480,7 +480,7 @@ impl backend::AuxStore for Blockchain { pub struct BlockImportOperation { pending_block: Option>, old_state: InMemoryBackend>, - new_state: Option>>, + trie_commit: Option>, aux: Vec<(Vec, Option>)>, finalized_blocks: Vec<(Block::Hash, Option)>, set_head: Option, @@ -502,14 +502,15 @@ impl BlockImportOperation { ) }); - let (root, transaction) = self.old_state.full_storage_root( + let transaction = self.old_state.full_storage_root( storage.top.iter().map(|(k, v)| (k.as_ref(), Some(v.as_ref()))), child_delta, state_version, ); + let root = transaction.root_hash(); if commit { - self.new_state = Some(transaction); + self.trie_commit = Some(transaction); } Ok(root) } @@ -538,9 +539,9 @@ impl backend::BlockImportOperation for BlockImportOperatio fn update_db_storage( &mut self, - update: BackendTransaction>, + update: BackendTransaction, ) -> sp_blockchain::Result<()> { - self.new_state = Some(update); + self.trie_commit = Some(update); Ok(()) } @@ -668,7 +669,7 @@ impl backend::Backend for Backend { Ok(BlockImportOperation { pending_block: None, old_state, - new_state: None, + trie_commit: None, aux: Default::default(), finalized_blocks: Default::default(), set_head: None, @@ -692,15 +693,15 @@ impl backend::Backend for Backend { } if let Some(pending_block) = operation.pending_block { - let old_state = &operation.old_state; let (header, body, justification) = pending_block.block.into_inner(); let hash = header.hash(); - let new_state = match operation.new_state { - Some(state) => old_state.update_backend(*header.state_root(), state), - None => old_state.clone(), - }; + let mut new_state = + operation.old_state.clone_in_mem().expect("Backend is MemoryDB; qed"); + if let Some(commit) = operation.trie_commit { + new_state.apply_transaction(commit); + } self.states.write().insert(hash, new_state); @@ -754,7 +755,7 @@ impl backend::Backend for Backend { self.states .read() .get(&hash) - .cloned() + .and_then(|s| s.clone_in_mem()) .ok_or_else(|| sp_blockchain::Error::UnknownBlock(format!("{}", hash))) } diff --git a/substrate/client/basic-authorship/src/basic_authorship.rs b/substrate/client/basic-authorship/src/basic_authorship.rs index 932287ac8657..f827d0004d41 100644 --- a/substrate/client/basic-authorship/src/basic_authorship.rs +++ b/substrate/client/basic-authorship/src/basic_authorship.rs @@ -774,8 +774,8 @@ mod tests { let storage_changes = api.into_storage_changes(&state, genesis_hash).unwrap(); assert_eq!( - proposal.storage_changes.transaction_storage_root, - storage_changes.transaction_storage_root, + proposal.storage_changes.transaction.root_hash(), + storage_changes.transaction.root_hash(), ); } diff --git a/substrate/client/cli/src/arg_enums.rs b/substrate/client/cli/src/arg_enums.rs index d436673cb9de..4cd7ca0ea88d 100644 --- a/substrate/client/cli/src/arg_enums.rs +++ b/substrate/client/cli/src/arg_enums.rs @@ -231,6 +231,8 @@ pub enum Database { RocksDb, /// ParityDb. ParityDb, + /// ParityDb using experimental multi tree storage. + ParityDbMulti, /// Detect whether there is an existing database. Use it, if there is, if not, create new /// instance of ParityDb Auto, @@ -246,6 +248,7 @@ impl Database { #[cfg(feature = "rocksdb")] "rocksdb", "paritydb", + "paritydbmulti", "paritydb-experimental", "auto", ] diff --git a/substrate/client/cli/src/config.rs b/substrate/client/cli/src/config.rs index 5def9ce9b726..8a806be25223 100644 --- a/substrate/client/cli/src/config.rs +++ b/substrate/client/cli/src/config.rs @@ -225,13 +225,16 @@ pub trait CliConfiguration: Sized { Ok(match database { #[cfg(feature = "rocksdb")] Database::RocksDb => DatabaseSource::RocksDb { path: rocksdb_path, cache_size }, - Database::ParityDb => DatabaseSource::ParityDb { path: paritydb_path }, + Database::ParityDb => + DatabaseSource::ParityDb { path: paritydb_path, multi_tree: false }, + Database::ParityDbMulti => + DatabaseSource::ParityDb { path: paritydb_path, multi_tree: true }, Database::ParityDbDeprecated => { eprintln!( "WARNING: \"paritydb-experimental\" database setting is deprecated and will be removed in future releases. \ Please update your setup to use the new value: \"paritydb\"." ); - DatabaseSource::ParityDb { path: paritydb_path } + DatabaseSource::ParityDb { path: paritydb_path, multi_tree: false } }, Database::Auto => DatabaseSource::Auto { paritydb_path, rocksdb_path, cache_size }, }) diff --git a/substrate/client/cli/src/runner.rs b/substrate/client/cli/src/runner.rs index 4201a0f4062f..ee59badb1b3c 100644 --- a/substrate/client/cli/src/runner.rs +++ b/substrate/client/cli/src/runner.rs @@ -247,7 +247,10 @@ mod tests { transaction_pool: Default::default(), network: NetworkConfiguration::new_memory(), keystore: sc_service::config::KeystoreConfig::InMemory, - database: sc_client_db::DatabaseSource::ParityDb { path: root.clone() }, + database: sc_client_db::DatabaseSource::ParityDb { + path: root.clone(), + multi_tree: false, + }, // TODO multitree here? likely no trie_cache_maximum_size: None, state_pruning: None, blocks_pruning: sc_client_db::BlocksPruning::KeepAll, @@ -349,6 +352,7 @@ mod tests { let output = std::process::Command::new(std::env::current_exe().unwrap()) .arg(test_name) .env("RUN_FORKED_TEST", "1") + .env("RUST_LOG", "info") .output() .unwrap(); diff --git a/substrate/client/consensus/aura/src/lib.rs b/substrate/client/consensus/aura/src/lib.rs index 1be7be8eeeaa..e40b71086b6c 100644 --- a/substrate/client/consensus/aura/src/lib.rs +++ b/substrate/client/consensus/aura/src/lib.rs @@ -668,12 +668,8 @@ mod tests { fn make_block_import( &self, client: PeersClient, - ) -> ( - BlockImportAdapter, - Option>, - Self::PeerData, - ) { - (client.as_block_import(), None, ()) + ) -> (Self::BlockImport, Option>, Self::PeerData) { + (client, None, ()) } fn peer(&mut self, i: usize) -> &mut AuraPeer { diff --git a/substrate/client/consensus/babe/src/lib.rs b/substrate/client/consensus/babe/src/lib.rs index ccf72939631a..ffec40ee944d 100644 --- a/substrate/client/consensus/babe/src/lib.rs +++ b/substrate/client/consensus/babe/src/lib.rs @@ -1885,9 +1885,10 @@ where } let revert_up_to_number = best_number - revertible; - let revert_up_to_hash = client.hash(revert_up_to_number)?.ok_or(ClientError::Backend( - format!("Unexpected hash lookup failure for block number: {}", revert_up_to_number), - ))?; + let revert_up_to_hash = + HeaderBackend::hash(&*client, revert_up_to_number)?.ok_or(ClientError::Backend( + format!("Unexpected hash lookup failure for block number: {}", revert_up_to_number), + ))?; // Revert epoch changes tree. diff --git a/substrate/client/consensus/babe/src/tests.rs b/substrate/client/consensus/babe/src/tests.rs index 38c9e1ff6ac2..c96c8ac770e2 100644 --- a/substrate/client/consensus/babe/src/tests.rs +++ b/substrate/client/consensus/babe/src/tests.rs @@ -106,15 +106,15 @@ impl DummyProposer { .build() .unwrap(); - let mut block = match block_builder.build().map_err(|e| e.into()) { - Ok(b) => b.block, + let (mut block, storage_changes) = match block_builder.build().map_err(|e| e.into()) { + Ok(b) => (b.block, b.storage_changes), Err(e) => return future::ready(Err(e)), }; // mutate the block header according to the mutator. (self.factory.mutator)(&mut block.header, Stage::PreSeal); - future::ready(Ok(Proposal { block, proof: (), storage_changes: Default::default() })) + future::ready(Ok(Proposal { block, proof: (), storage_changes })) } } @@ -220,11 +220,7 @@ impl TestNetFactory for BabeTestNet { fn make_block_import( &self, client: PeersClient, - ) -> ( - BlockImportAdapter, - Option>, - Option, - ) { + ) -> (Self::BlockImport, Option>, Option) { let client = client.as_client(); let config = crate::configuration(&*client).expect("config available"); @@ -235,11 +231,7 @@ impl TestNetFactory for BabeTestNet { let data_block_import = Mutex::new(Some(Box::new(block_import.clone()) as BoxBlockImport<_>)); - ( - BlockImportAdapter::new(block_import), - None, - Some(PeerData { link, block_import: data_block_import }), - ) + (block_import, None, Some(PeerData { link, block_import: data_block_import })) } fn make_verifier(&self, client: PeersClient, maybe_link: &Option) -> Self::Verifier { diff --git a/substrate/client/consensus/beefy/src/tests.rs b/substrate/client/consensus/beefy/src/tests.rs index d106c9dcd881..dffea6fca90a 100644 --- a/substrate/client/consensus/beefy/src/tests.rs +++ b/substrate/client/consensus/beefy/src/tests.rs @@ -45,8 +45,7 @@ use sc_consensus::{ }; use sc_network::{config::RequestResponseConfig, ProtocolName}; use sc_network_test::{ - Block, BlockImportAdapter, FullPeerConfig, PassThroughVerifier, Peer, PeersClient, - PeersFullClient, TestNetFactory, + Block, FullPeerConfig, PassThroughVerifier, Peer, PeersClient, PeersFullClient, TestNetFactory, }; use sc_utils::notification::NotificationReceiver; use serde::{Deserialize, Serialize}; @@ -83,12 +82,8 @@ const GOOD_MMR_ROOT: MmrRootHash = MmrRootHash::repeat_byte(0xbf); const BAD_MMR_ROOT: MmrRootHash = MmrRootHash::repeat_byte(0x42); const ALTERNATE_BAD_MMR_ROOT: MmrRootHash = MmrRootHash::repeat_byte(0x13); -type BeefyBlockImport = crate::BeefyBlockImport< - Block, - substrate_test_runtime_client::Backend, - TestApi, - BlockImportAdapter, ->; +type BeefyBlockImport = + crate::BeefyBlockImport; pub(crate) type BeefyValidatorSet = ValidatorSet; pub(crate) type BeefyPeer = Peer; @@ -207,15 +202,11 @@ impl TestNetFactory for BeefyTestNet { fn make_block_import( &self, client: PeersClient, - ) -> ( - BlockImportAdapter, - Option>, - Self::PeerData, - ) { + ) -> (Self::BlockImport, Option>, Self::PeerData) { let keys = &[BeefyKeyring::Alice, BeefyKeyring::Bob]; let validator_set = ValidatorSet::new(make_beefy_ids(keys), 0).unwrap(); let api = Arc::new(TestApi::new(self.beefy_genesis, &validator_set, GOOD_MMR_ROOT)); - let inner = BlockImportAdapter::new(client.clone()); + let inner = client.clone(); let (block_import, voter_links, rpc_links) = beefy_block_import_and_links(inner, client.as_backend(), api, None); let peer_data = PeerData { @@ -223,7 +214,7 @@ impl TestNetFactory for BeefyTestNet { beefy_voter_links: Mutex::new(Some(voter_links)), ..Default::default() }; - (BlockImportAdapter::new(block_import), None, peer_data) + (block_import, None, peer_data) } fn peer(&mut self, i: usize) -> &mut BeefyPeer { diff --git a/substrate/client/consensus/grandpa/src/import.rs b/substrate/client/consensus/grandpa/src/import.rs index bc2983569c53..597fbf929ef6 100644 --- a/substrate/client/consensus/grandpa/src/import.rs +++ b/substrate/client/consensus/grandpa/src/import.rs @@ -21,7 +21,7 @@ use std::{collections::HashMap, marker::PhantomData, sync::Arc}; use log::debug; use parity_scale_codec::Decode; -use sc_client_api::{backend::Backend, utils::is_descendent_of}; +use sc_client_api::{backend::Backend, utils::is_descendent_of, HeaderBackend}; use sc_consensus::{ shared_data::{SharedDataLocked, SharedDataLockedUpgradable}, BlockCheckParams, BlockImport, BlockImportParams, ImportResult, JustificationImport, @@ -363,7 +363,7 @@ where // best finalized block. let best_finalized_number = self.inner.info().finalized_number; let canon_number = best_finalized_number.min(median_last_finalized_number); - let canon_hash = self.inner.hash(canon_number) + let canon_hash = HeaderBackend::hash(&*self.inner, canon_number) .map_err(|e| ConsensusError::ClientImport(e.to_string()))? .expect( "the given block number is less or equal than the current best finalized number; \ diff --git a/substrate/client/consensus/grandpa/src/tests.rs b/substrate/client/consensus/grandpa/src/tests.rs index 7e42c2d45c73..f2ada069ae59 100644 --- a/substrate/client/consensus/grandpa/src/tests.rs +++ b/substrate/client/consensus/grandpa/src/tests.rs @@ -30,8 +30,8 @@ use sc_consensus::{ }; use sc_network::config::Role; use sc_network_test::{ - Block, BlockImportAdapter, FullPeerConfig, Hash, PassThroughVerifier, Peer, PeersClient, - PeersFullClient, TestClient, TestNetFactory, + Block, FullPeerConfig, Hash, PassThroughVerifier, Peer, PeersClient, PeersFullClient, + TestClient, TestNetFactory, }; use sc_transaction_pool_api::RejectAllTxPool; use sp_api::{ApiRef, ProvideRuntimeApi}; @@ -124,7 +124,7 @@ impl TestNetFactory for GrandpaTestNet { fn make_block_import( &self, client: PeersClient, - ) -> (BlockImportAdapter, Option>, PeerData) { + ) -> (Self::BlockImport, Option>, PeerData) { let (client, backend) = (client.as_client(), client.as_backend()); let (import, link) = block_import( client.clone(), @@ -135,7 +135,7 @@ impl TestNetFactory for GrandpaTestNet { ) .expect("Could not create block import for fresh peer."); let justification_import = Box::new(import.clone()); - (BlockImportAdapter::new(import), Some(justification_import), Mutex::new(Some(link))) + (import, Some(justification_import), Mutex::new(Some(link))) } fn peer(&mut self, i: usize) -> &mut GrandpaPeer { diff --git a/substrate/client/db/Cargo.toml b/substrate/client/db/Cargo.toml index 57ee1a8ad331..81fba4bd3892 100644 --- a/substrate/client/db/Cargo.toml +++ b/substrate/client/db/Cargo.toml @@ -19,13 +19,13 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.6.1", features = [ "derive", ] } -hash-db = "0.16.0" +trie-db = { package = "subtrie", version = "0.0.1" } kvdb = "0.13.0" kvdb-memorydb = "0.13.0" kvdb-rocksdb = { version = "0.19.0", optional = true } linked-hash-map = "0.5.4" log = { workspace = true, default-features = true } -parity-db = "0.4.12" +parity-db = "0.4.13" parking_lot = "0.12.1" sc-client-api = { path = "../api" } sc-state-db = { path = "../state-db" } @@ -40,13 +40,13 @@ sp-trie = { path = "../../primitives/trie" } [dev-dependencies] criterion = "0.4.0" +fs_extra = "1" kvdb-rocksdb = "0.19.0" rand = "0.8.5" tempfile = "3.1.0" quickcheck = { version = "1.0.3", default-features = false } kitchensink-runtime = { path = "../../bin/node/runtime" } sp-tracing = { path = "../../primitives/tracing" } -substrate-test-runtime-client = { path = "../../test-utils/runtime/client" } array-bytes = "6.1" [features] diff --git a/substrate/client/db/benches/state_access.rs b/substrate/client/db/benches/state_access.rs index e47559e710df..a91adbba5ed9 100644 --- a/substrate/client/db/benches/state_access.rs +++ b/substrate/client/db/benches/state_access.rs @@ -83,11 +83,11 @@ fn insert_blocks(db: &Backend, storage: Vec<(Vec, Vec)>) -> H256 .map(|(k, v)| (k.clone(), Some(v.clone()))) .collect::>(); - let (state_root, tx) = db.state_at(parent_hash).unwrap().storage_root( - changes.iter().map(|(k, v)| (k.as_slice(), v.as_deref())), + let tx = db.state_at(parent_hash).unwrap().storage_root( + changes.iter().map(|(k, v)| (k.as_slice(), v.as_deref(), None)), StateVersion::V1, ); - header.state_root = state_root; + header.state_root = tx.root_hash(); op.update_db_storage(tx).unwrap(); op.update_storage(changes.clone(), Default::default()).unwrap(); @@ -120,7 +120,8 @@ fn create_backend(config: BenchmarkConfig, temp_dir: &TempDir) -> Backend let settings = DatabaseSettings { trie_cache_maximum_size, state_pruning: Some(PruningMode::ArchiveAll), - source: DatabaseSource::ParityDb { path }, + source: DatabaseSource::ParityDb { path, multi_tree: true }, + //source: DatabaseSource::ParityDb { path, multi_tree: false }, TODO both false and true? blocks_pruning: BlocksPruning::KeepAll, }; diff --git a/substrate/client/db/src/bench.rs b/substrate/client/db/src/bench.rs index 32503cf63c0a..e6bc68b02f09 100644 --- a/substrate/client/db/src/bench.rs +++ b/substrate/client/db/src/bench.rs @@ -19,8 +19,6 @@ //! State backend that's useful for benchmarking use crate::{DbState, DbStateBuilder}; -use hash_db::{Hasher as DbHasher, Prefix}; -use kvdb::{DBTransaction, KeyValueDB}; use linked_hash_map::LinkedHashMap; use parking_lot::Mutex; use sp_core::{ @@ -29,35 +27,21 @@ use sp_core::{ }; use sp_runtime::{traits::Hash, StateVersion, Storage}; use sp_state_machine::{ - backend::Backend as StateBackend, BackendTransaction, ChildStorageCollection, DBValue, - IterArgs, StorageCollection, StorageIterator, StorageKey, StorageValue, + backend::{Backend as StateBackend, DBLocation}, + BackendTransaction, ChildStorageCollection, IterArgs, StorageCollection, StorageIterator, + StorageKey, StorageValue, }; use sp_trie::{ cache::{CacheSize, SharedTrieCache}, - prefixed_key, MemoryDB, MerkleValue, + ChildChangeset, MemoryDB, MerkleValue, }; use std::{ cell::{Cell, RefCell}, - collections::HashMap, sync::Arc, }; type State = DbState; -struct StorageDb { - db: Arc, - _phantom: std::marker::PhantomData, -} - -impl sp_state_machine::Storage for StorageDb { - fn get(&self, key: &Hasher::Output, prefix: Prefix) -> Result, String> { - let prefixed_key = prefixed_key::(key, prefix); - self.db - .get(0, &prefixed_key) - .map_err(|e| format!("Database backend error: {:?}", e)) - } -} - struct KeyTracker { enable_tracking: bool, /// Key tracker for keys in the main trie. @@ -73,17 +57,14 @@ struct KeyTracker { /// State that manages the backend database reference. Allows runtime to control the database. pub struct BenchmarkingState { - root: Cell, genesis_root: Hasher::Output, + genesis: MemoryDB, state: RefCell>>, - db: Cell>>, - genesis: HashMap, (Vec, i32)>, - record: Cell>>, key_tracker: Arc>, whitelist: RefCell>, - proof_recorder: Option>, + proof_recorder: Option>, proof_recorder_root: Cell, - shared_trie_cache: SharedTrieCache, + shared_trie_cache: SharedTrieCache, } /// A raw iterator over the `BenchmarkingState`. @@ -134,17 +115,16 @@ impl BenchmarkingState { enable_tracking: bool, ) -> Result { let state_version = sp_runtime::StateVersion::default(); - let mut root = Default::default(); - let mut mdb = MemoryDB::::default(); - sp_trie::trie_types::TrieDBMutBuilderV1::::new(&mut mdb, &mut root).build(); + let mdb = MemoryDB::::default(); + let root = sp_trie::trie_types::TrieDBMutBuilderV1::::new(&mdb) + .build() + .commit() + .root_hash(); let mut state = BenchmarkingState { state: RefCell::new(None), - db: Cell::new(None), - root: Cell::new(root), genesis: Default::default(), genesis_root: Default::default(), - record: Default::default(), key_tracker: Arc::new(Mutex::new(KeyTracker { main_keys: Default::default(), child_keys: Default::default(), @@ -159,40 +139,33 @@ impl BenchmarkingState { state.add_whitelist_to_tracker(); - state.reopen()?; + *state.state.borrow_mut() = + Some(DbStateBuilder::::new(Box::new(mdb), root).build()); + let child_delta = genesis.children_default.values().map(|child_content| { ( &child_content.child_info, child_content.data.iter().map(|(k, v)| (k.as_ref(), Some(v.as_ref()))), ) }); - let (root, transaction): (Hasher::Output, _) = - state.state.borrow().as_ref().unwrap().full_storage_root( - genesis.top.iter().map(|(k, v)| (k.as_ref(), Some(v.as_ref()))), - child_delta, - state_version, - ); - state.genesis = transaction.clone().drain(); - state.genesis_root = root; - state.commit(root, transaction, Vec::new(), Vec::new())?; - state.record.take(); + let transaction = state.state.borrow().as_ref().unwrap().full_storage_root( + genesis.top.iter().map(|(k, v)| (k.as_ref(), Some(v.as_ref()))), + child_delta, + state_version, + ); + let mut genesis = MemoryDB::::default(); + let genesis_root = transaction.apply_to(&mut genesis); + state.genesis = genesis; + state.genesis_root = genesis_root; + state.reopen()?; Ok(state) } fn reopen(&self) -> Result<(), String> { *self.state.borrow_mut() = None; - let db = match self.db.take() { - Some(db) => db, - None => Arc::new(kvdb_memorydb::create(1)), - }; - self.db.set(Some(db.clone())); - if let Some(recorder) = &self.proof_recorder { - recorder.reset(); - self.proof_recorder_root.set(self.root.get()); - } - let storage_db = Arc::new(StorageDb:: { db, _phantom: Default::default() }); + let db = Box::new(self.genesis.clone()); *self.state.borrow_mut() = Some( - DbStateBuilder::::new(storage_db, self.root.get()) + DbStateBuilder::::new(db, self.genesis_root) .with_optional_recorder(self.proof_recorder.clone()) .with_cache(self.shared_trie_cache.local_cache()) .build(), @@ -340,7 +313,6 @@ fn state_err() -> String { impl StateBackend for BenchmarkingState { type Error = as StateBackend>::Error; - type TrieBackendStorage = as StateBackend>::TrieBackendStorage; type RawIter = RawIter; fn storage(&self, key: &[u8]) -> Result>, Self::Error> { @@ -438,13 +410,15 @@ impl StateBackend for BenchmarkingState { fn storage_root<'a>( &self, - delta: impl Iterator)>, + delta: impl Iterator, Option>)>, state_version: StateVersion, - ) -> (Hasher::Output, BackendTransaction) { + ) -> BackendTransaction { self.state .borrow() .as_ref() - .map_or(Default::default(), |s| s.storage_root(delta, state_version)) + .map_or(BackendTransaction::unchanged(self.genesis_root), |s| { + s.storage_root(delta, state_version) + }) } fn child_storage_root<'a>( @@ -452,11 +426,11 @@ impl StateBackend for BenchmarkingState { child_info: &ChildInfo, delta: impl Iterator)>, state_version: StateVersion, - ) -> (Hasher::Output, bool, BackendTransaction) { - self.state - .borrow() - .as_ref() - .map_or(Default::default(), |s| s.child_storage_root(child_info, delta, state_version)) + ) -> (BackendTransaction, bool) { + self.state.borrow().as_ref().map_or_else( + || (BackendTransaction::unchanged(self.genesis_root), true), + |s| s.child_storage_root(child_info, delta, state_version), + ) } fn raw_iter(&self, args: IterArgs) -> Result { @@ -476,30 +450,20 @@ impl StateBackend for BenchmarkingState { fn commit( &self, - storage_root: ::Out, - mut transaction: BackendTransaction, + transaction: BackendTransaction, main_storage_changes: StorageCollection, child_storage_changes: ChildStorageCollection, ) -> Result<(), Self::Error> { - if let Some(db) = self.db.take() { - let mut db_transaction = DBTransaction::new(); - let changes = transaction.drain(); - let mut keys = Vec::with_capacity(changes.len()); - for (key, (val, rc)) in changes { - if rc > 0 { - db_transaction.put(0, &key, &val); - } else if rc < 0 { - db_transaction.delete(0, &key); - } - keys.push(key); + if let Some(state) = &mut *self.state.borrow_mut() { + if let Some(mut db) = state.backend_storage_mut().as_mem_db_mut() { + let root = transaction.apply_to(&mut db); + state.set_root(root); + } else if let Some(mut db) = state.backend_storage_mut().as_prefixed_mem_db_mut() { + let root = transaction.apply_to(&mut db); + state.set_root(root); + } else { + unreachable!() } - let mut record = self.record.take(); - record.extend(keys); - self.record.set(record); - db.write(db_transaction) - .map_err(|_| String::from("Error committing transaction"))?; - self.root.set(storage_root); - self.db.set(Some(db)); // Track DB Writes main_storage_changes.iter().for_each(|(key, _)| { @@ -510,29 +474,12 @@ impl StateBackend for BenchmarkingState { self.add_write_key(Some(child_storage_key), key); }) }); - } else { - return Err("Trying to commit to a closed db".into()) } - self.reopen() + Ok(()) } fn wipe(&self) -> Result<(), Self::Error> { // Restore to genesis - let record = self.record.take(); - if let Some(db) = self.db.take() { - let mut db_transaction = DBTransaction::new(); - for key in record { - match self.genesis.get(&key) { - Some((v, _)) => db_transaction.put(0, &key, v), - None => db_transaction.delete(0, &key), - } - } - db.write(db_transaction) - .map_err(|_| String::from("Error committing transaction"))?; - self.db.set(Some(db)); - } - - self.root.set(self.genesis_root); self.reopen()?; self.wipe_tracker(); Ok(()) @@ -631,16 +578,22 @@ impl StateBackend for BenchmarkingState { log::debug!(target: "benchmark", "Some proof size: {}", &proof_size); proof_size } else { + let root = if let Some(state) = self.state.borrow().as_ref().map(|s| *s.root()) { + state + } else { + self.genesis_root + }; + if let Some(size) = proof.encoded_compact_size::(proof_recorder_root) { size as u32 - } else if proof_recorder_root == self.root.get() { + } else if proof_recorder_root == root { log::debug!(target: "benchmark", "No changes - no proof"); 0 } else { panic!( "proof rec root {:?}, root {:?}, genesis {:?}, rec_len {:?}", self.proof_recorder_root.get(), - self.root.get(), + root, self.genesis_root, proof_size, ); @@ -660,7 +613,7 @@ impl std::fmt::Debug for BenchmarkingState { mod test { use crate::bench::BenchmarkingState; use sp_runtime::traits::HashingFor; - use sp_state_machine::backend::Backend as _; + use sp_state_machine::{backend::Backend as _, BackendTransaction}; fn hex(hex: &str) -> Vec { array_bytes::hex2bytes(hex).unwrap() @@ -710,8 +663,7 @@ mod test { bench_state .commit( - Default::default(), - Default::default(), + BackendTransaction::unchanged(Default::default()), vec![("foo".as_bytes().to_vec(), None)], vec![("child1".as_bytes().to_vec(), vec![("foo".as_bytes().to_vec(), None)])], ) diff --git a/substrate/client/db/src/children.rs b/substrate/client/db/src/children.rs index 067137653718..2ae793f77a75 100644 --- a/substrate/client/db/src/children.rs +++ b/substrate/client/db/src/children.rs @@ -96,14 +96,14 @@ mod tests { let mut children1 = Vec::new(); children1.push(1_3); children1.push(1_5); - write_children(&mut tx, 0, PREFIX, 1_1, children1); + write_children(&mut tx, 0, PREFIX, 1_1, children1.clone()); let mut children2 = Vec::new(); children2.push(1_4); children2.push(1_6); write_children(&mut tx, 0, PREFIX, 1_2, children2); - db.commit(tx.clone()).unwrap(); + db.commit(tx).unwrap(); let r1: Vec = read_children(&*db, 0, PREFIX, 1_1).expect("(1) Getting r1 failed"); let r2: Vec = read_children(&*db, 0, PREFIX, 1_2).expect("(1) Getting r2 failed"); @@ -111,6 +111,8 @@ mod tests { assert_eq!(r1, vec![1_3, 1_5]); assert_eq!(r2, vec![1_4, 1_6]); + let mut tx = Transaction::new(); + write_children(&mut tx, 0, PREFIX, 1_1, children1); remove_children(&mut tx, 0, PREFIX, 1_2); db.commit(tx).unwrap(); diff --git a/substrate/client/db/src/lib.rs b/substrate/client/db/src/lib.rs index 0faa90dfc4f9..a36c3b39ae59 100644 --- a/substrate/client/db/src/lib.rs +++ b/substrate/client/db/src/lib.rs @@ -55,10 +55,9 @@ use crate::{ pinned_blocks_cache::PinnedBlocksCache, record_stats_state::RecordStatsState, stats::StateUsageStats, - utils::{meta_keys, read_db, read_meta, DatabaseType, Meta}, + utils::{meta_keys, read_db, read_meta, Meta}, }; use codec::{Decode, Encode}; -use hash_db::Prefix; use sc_client_api::{ backend::NewBlockState, leaves::{FinalizationOutcome, LeafSet}, @@ -75,7 +74,6 @@ use sp_core::{ offchain::OffchainOverlayedChange, storage::{well_known_keys, ChildInfo}, }; -use sp_database::Transaction; use sp_runtime::{ generic::BlockId, traits::{ @@ -86,29 +84,27 @@ use sp_runtime::{ }; use sp_state_machine::{ backend::{AsTrieBackend, Backend as StateBackend}, - BackendTransaction, ChildStorageCollection, DBValue, IndexOperation, IterArgs, - OffchainChangesCollection, StateMachineStats, StorageCollection, StorageIterator, StorageKey, - StorageValue, UsageInfo as StateUsageInfo, + BackendTransaction, ChildStorageCollection, DBLocation, DBValue, IndexOperation, IterArgs, + NodeDB, OffchainChangesCollection, StateMachineStats, StorageCollection, StorageIterator, + StorageKey, StorageValue, UsageInfo as StateUsageInfo, }; -use sp_trie::{cache::SharedTrieCache, prefixed_key, MemoryDB, MerkleValue, PrefixedMemoryDB}; +use sp_trie::{cache::SharedTrieCache, prefixed_key, ChildChangeset, MemoryDB, MerkleValue}; +use trie_db::node_db::Prefix; // Re-export the Database trait so that one can pass an implementation of it. pub use sc_state_db::PruningMode; -pub use sp_database::Database; +pub use sp_database::{Database, StateCapabilities, Transaction}; +pub use utils::open_database; pub use bench::BenchmarkingState; const CACHE_HEADERS: usize = 8; /// DB-backed patricia trie state, transaction type is an overlay of changes to commit. -pub type DbState = sp_state_machine::TrieBackend>, H>; +pub type DbState = sp_state_machine::TrieBackend; /// Builder for [`DbState`]. -pub type DbStateBuilder = - sp_state_machine::TrieBackendBuilder>, Hasher>; - -/// Length of a [`DbHash`]. -const DB_HASH_LEN: usize = 32; +pub type DbStateBuilder = sp_state_machine::TrieBackendBuilder; /// Hash type that this backend uses for the database. pub type DbHash = sp_core::H256; @@ -133,14 +129,14 @@ enum DbExtrinsic { /// until this structure is dropped. pub struct RefTrackingState { state: DbState>, - storage: Arc>, + storage: StorageDb, parent_hash: Option, } impl RefTrackingState { fn new( state: DbState>, - storage: Arc>, + storage: StorageDb, parent_hash: Option, ) -> Self { RefTrackingState { state, parent_hash, storage } @@ -150,7 +146,9 @@ impl RefTrackingState { impl Drop for RefTrackingState { fn drop(&mut self) { if let Some(hash) = &self.parent_hash { - self.storage.state_db.unpin(hash); + if let Some(state_db) = &self.storage.state_db { + state_db.unpin(hash); + } } } } @@ -188,8 +186,6 @@ impl StorageIterator> for RawIter { impl StateBackend> for RefTrackingState { type Error = > as StateBackend>>::Error; - type TrieBackendStorage = - > as StateBackend>>::TrieBackendStorage; type RawIter = RawIter; fn storage(&self, key: &[u8]) -> Result>, Self::Error> { @@ -257,9 +253,9 @@ impl StateBackend> for RefTrackingState { fn storage_root<'a>( &self, - delta: impl Iterator)>, + delta: impl Iterator, Option>)>, state_version: StateVersion, - ) -> (B::Hash, BackendTransaction>) { + ) -> BackendTransaction { self.state.storage_root(delta, state_version) } @@ -268,7 +264,7 @@ impl StateBackend> for RefTrackingState { child_info: &ChildInfo, delta: impl Iterator)>, state_version: StateVersion, - ) -> (B::Hash, bool, BackendTransaction>) { + ) -> (BackendTransaction, bool) { self.state.child_storage_root(child_info, delta, state_version) } @@ -286,13 +282,12 @@ impl StateBackend> for RefTrackingState { } impl AsTrieBackend> for RefTrackingState { - type TrieBackendStorage = - > as StateBackend>>::TrieBackendStorage; + fn as_trie_backend(&self) -> &sp_state_machine::TrieBackend> { + self.state.as_trie_backend() + } - fn as_trie_backend( - &self, - ) -> &sp_state_machine::TrieBackend> { - &self.state.as_trie_backend() + fn as_trie_backend_mut(&mut self) -> &mut sp_state_machine::TrieBackend> { + self.state.as_trie_backend_mut() } } @@ -359,6 +354,8 @@ pub enum DatabaseSource { ParityDb { /// Path to the database. path: PathBuf, + /// Multi tree state use. + multi_tree: bool, }, /// Use a custom already-open database. @@ -382,7 +379,7 @@ impl DatabaseSource { DatabaseSource::Auto { paritydb_path, .. } => Some(paritydb_path), #[cfg(feature = "rocksdb")] DatabaseSource::RocksDb { path, .. } => Some(path), - DatabaseSource::ParityDb { path } => Some(path), + DatabaseSource::ParityDb { path, .. } => Some(path), DatabaseSource::Custom { .. } => None, } } @@ -390,16 +387,16 @@ impl DatabaseSource { /// Set path for databases that are stored on disk. pub fn set_path(&mut self, p: &Path) -> bool { match self { - DatabaseSource::Auto { ref mut paritydb_path, .. } => { + DatabaseSource::Auto { paritydb_path, .. } => { *paritydb_path = p.into(); true }, #[cfg(feature = "rocksdb")] - DatabaseSource::RocksDb { ref mut path, .. } => { + DatabaseSource::RocksDb { path, .. } => { *path = p.into(); true }, - DatabaseSource::ParityDb { ref mut path } => { + DatabaseSource::ParityDb { path, .. } => { *path = p.into(); true }, @@ -446,9 +443,9 @@ struct PendingBlock { leaf_state: NewBlockState, } -// wrapper that implements trait required for state_db +/// Wrapper that implements trait required for state_db #[derive(Clone)] -struct StateMetaDb(Arc>); +pub struct StateMetaDb(Arc>); impl sc_state_db::MetaDb for StateMetaDb { type Error = sp_database::error::DatabaseError; @@ -458,6 +455,17 @@ impl sc_state_db::MetaDb for StateMetaDb { } } +struct StateNodeDb<'a>(&'a dyn Database); + +impl<'a> sc_state_db::NodeDb for StateNodeDb<'a> { + type Error = io::Error; + type Key = [u8]; + + fn get(&self, key: &[u8]) -> Result, Self::Error> { + Ok(self.0.get(columns::STATE, key)) + } +} + struct MetaUpdate { pub hash: Block::Hash, pub number: NumberFor, @@ -845,7 +853,7 @@ impl HeaderMetadata for BlockchainDb { /// Database transaction pub struct BlockImportOperation { old_state: RecordStatsState, Block>, - db_updates: PrefixedMemoryDB>, + db_updates: BackendTransaction, storage_updates: StorageCollection, child_storage_updates: ChildStorageCollection, offchain_storage_updates: OffchainChangesCollection, @@ -900,13 +908,14 @@ impl BlockImportOperation { ) }); - let (root, transaction) = self.old_state.full_storage_root( + let transaction = self.old_state.full_storage_root( storage.top.iter().map(|(k, v)| (&k[..], Some(&v[..]))), child_delta, state_version, ); self.db_updates = transaction; + let root = self.db_updates.root_hash(); Ok(root) } } @@ -934,10 +943,7 @@ impl sc_client_api::backend::BlockImportOperation Ok(()) } - fn update_db_storage( - &mut self, - update: PrefixedMemoryDB>, - ) -> ClientResult<()> { + fn update_db_storage(&mut self, update: BackendTransaction) -> ClientResult<()> { self.db_updates = update; Ok(()) } @@ -1010,48 +1016,138 @@ impl sc_client_api::backend::BlockImportOperation } } -struct StorageDb { +#[derive(Clone)] +/// State storage. +pub struct StorageDb { + /// Database backend. pub db: Arc>, - pub state_db: StateDb, StateMetaDb>, - prefix_keys: bool, + /// Pruning manager. + pub state_db: Option, StateMetaDb>>>, } -impl sp_state_machine::Storage> for StorageDb { - fn get(&self, key: &Block::Hash, prefix: Prefix) -> Result, String> { - if self.prefix_keys { - let key = prefixed_key::>(key, prefix); - self.state_db.get(&key, self) +impl StorageDb { + fn contains_root(&self, root: &Block::Hash) -> bool { + match self.db.state_capabilities() { + StateCapabilities::TreeColumn => + self.db.get_node(columns::STATE, root.as_ref(), Default::default()).is_some(), + StateCapabilities::RefCounted => self.db.get(columns::STATE, root.as_ref()).is_some(), + StateCapabilities::None => { + let key = prefixed_key::>(root, Default::default()); + self.db.get(columns::STATE, key.as_ref()).is_some() + }, + } + } + + /// Db do not have empty trie node optimization, so in certain + /// case it can be necessary to manually add it. + pub fn insert_empty_trie_node(&mut self) { + use trie_db::NodeCodec; + type C = sp_trie::NodeCodec>; + let mut transaction = Transaction::default(); + let empty_hash = C::::hashed_null_node(); + let empty_data = C::::empty_node().to_vec(); + let state_capabilities = self.db.state_capabilities(); + if state_capabilities == StateCapabilities::TreeColumn { + let commit = trie_db::triedbmut::Changeset::new_empty::>(); + apply_tree_commit::>(commit, state_capabilities, &mut transaction); } else { - self.state_db.get(key.as_ref(), self) + // cannot use apply tree commit as null node would not be include in temp memdb. + transaction.set_from_vec(columns::STATE, empty_hash.as_ref(), empty_data); } - .map_err(|e| format!("Database backend error: {:?}", e)) + + self.db.commit(transaction).expect("Failed to write transaction"); } } -impl sc_state_db::NodeDb for StorageDb { - type Error = io::Error; - type Key = [u8]; +impl sp_state_machine::AsDB> for StorageDb { + fn as_node_db(&self) -> &dyn NodeDB, DBValue, DBLocation> { + self + } +} - fn get(&self, key: &[u8]) -> Result>, Self::Error> { - Ok(self.db.get(columns::STATE, key)) +impl sp_state_machine::NodeDB, DBValue, DBLocation> + for StorageDb +{ + fn get( + &self, + key: &Block::Hash, + prefix: Prefix, + location: DBLocation, + ) -> Option<(DBValue, Vec)> { + if let Some(state_db) = &self.state_db { + if self.db.state_capabilities().needs_key_prefixing() { + let key = prefixed_key::>(key, prefix); + state_db.get(&key, &StateNodeDb(&*self.db)) + } else { + state_db.get(key.as_ref(), &StateNodeDb(&*self.db)) + } + .unwrap_or_else(|e| { + warn!("Database backend error: {:?}", e); + None + }) + .map(|value| (value, Default::default())) + } else { + // having state db does not always means multitree + // (eg benchmarks bypass it). + match self.db.state_capabilities() { + StateCapabilities::TreeColumn => { + if let Some((v, locs)) = + self.db.get_node(columns::STATE, key.as_ref(), location) + { + if cfg!(debug_assertions) { + let hash = sp_core::blake2_256(&v); + assert_eq!( + key.as_ref(), + hash.as_slice(), + "Bad hash at location {location}, key={:?}, hash={:?}", + sp_core::hexdisplay::HexDisplay::from(&key.as_ref()), + sp_core::hexdisplay::HexDisplay::from(&hash), + ); + } + Some((v, locs)) + } else { + warn!("Missing loc {:?}", location); + None + } + }, + + StateCapabilities::RefCounted => + self.db.get(columns::STATE, key.as_ref()).map(|v| (v, Default::default())), + StateCapabilities::None => { + let key = prefixed_key::>(key, prefix); + self.db.get(columns::STATE, key.as_ref()).map(|v| (v, Default::default())) + }, + } + } } } +#[derive(Clone)] struct DbGenesisStorage { root: Block::Hash, - storage: PrefixedMemoryDB>, + storage: Arc>>, } impl DbGenesisStorage { - pub fn new(root: Block::Hash, storage: PrefixedMemoryDB>) -> Self { - DbGenesisStorage { root, storage } + pub fn new(root: Block::Hash, storage: MemoryDB>) -> Self { + DbGenesisStorage { root, storage: Arc::new(storage) } + } +} + +impl NodeDB, DBValue, DBLocation> for DbGenesisStorage { + fn get( + &self, + key: &Block::Hash, + prefix: Prefix, + location: DBLocation, + ) -> Option<(DBValue, Vec)> { + NodeDB::get(&*self.storage, key, prefix, location) } } -impl sp_state_machine::Storage> for DbGenesisStorage { - fn get(&self, key: &Block::Hash, prefix: Prefix) -> Result, String> { - use hash_db::HashDB; - Ok(self.storage.get(key, prefix)) +impl sp_state_machine::AsDB> for DbGenesisStorage { + fn as_node_db(&self) -> &dyn NodeDB, DBValue, DBLocation> { + self } } @@ -1059,18 +1155,26 @@ struct EmptyStorage(pub Block::Hash); impl EmptyStorage { pub fn new() -> Self { - let mut root = Block::Hash::default(); let mut mdb = MemoryDB::>::default(); // both triedbmut are the same on empty storage. - sp_trie::trie_types::TrieDBMutBuilderV1::>::new(&mut mdb, &mut root) - .build(); + let root = sp_trie::trie_types::TrieDBMutBuilderV1::>::new(&mut mdb) + .build() + .commit() + .root_hash(); EmptyStorage(root) } } -impl sp_state_machine::Storage> for EmptyStorage { - fn get(&self, _key: &Block::Hash, _prefix: Prefix) -> Result, String> { - Ok(None) +impl sp_state_machine::NodeDB, DBValue, DBLocation> + for EmptyStorage +{ + fn get( + &self, + _key: &Block::Hash, + _prefix: Prefix, + _location: DBLocation, + ) -> Option<(DBValue, Vec)> { + None } } @@ -1114,22 +1218,105 @@ impl FrozenForDuration { } } +/// Apply trie commit to the database transaction. +pub fn apply_tree_commit( + commit: BackendTransaction, + state_capabilities: StateCapabilities, + tx: &mut Transaction, +) { + fn convert(node: sp_trie::Changeset) -> sp_database::NodeRef { + match node { + sp_trie::Changeset::Existing(node) => sp_database::NodeRef::Existing(node.location), + sp_trie::Changeset::New(node) => sp_database::NodeRef::New(sp_database::NewNode { + data: node.data, + children: node.children.into_iter().map(|c| convert::(c)).collect(), + }), + } + } + + match state_capabilities { + StateCapabilities::TreeColumn => { + let hash = commit.root_hash(); + match commit { + sp_trie::Changeset::Existing(node) => { + tx.reference_tree(columns::STATE, DbHash::from_slice(node.hash.as_ref())); + }, + new_node @ sp_trie::Changeset::New(_) => { + if let sp_database::NodeRef::New(n) = convert::(new_node) { + tx.insert_tree(columns::STATE, DbHash::from_slice(hash.as_ref()), n); + } + }, + } + }, + StateCapabilities::RefCounted => { + let mut memdb = sp_trie::MemoryDB::::default(); + commit.apply_to(&mut memdb); + + for (key, (val, rc)) in memdb.drain() { + if rc > 0 { + if rc == 1 { + tx.set_from_vec(columns::STATE, key.as_ref(), val.to_vec()); + } else { + tx.set_from_vec(columns::STATE, key.as_ref(), val.to_vec()); + for _ in 0..rc - 1 { + tx.set_from_vec(columns::STATE, key.as_ref(), val.to_vec()); + } + } + } else if rc < 0 { + if rc == -1 { + tx.remove(columns::STATE, key.as_ref()); + } else { + for _ in 0..-rc { + tx.remove(columns::STATE, key.as_ref()); + } + } + } + } + }, + StateCapabilities::None => { + let mut memdb = sp_trie::PrefixedMemoryDB::::default(); + commit.apply_to(&mut memdb); + + for (key, (val, rc)) in memdb.drain() { + if rc > 0 { + if rc == 1 { + tx.set_from_vec(columns::STATE, key.as_ref(), val.to_vec()); + } else { + tx.set_from_vec(columns::STATE, key.as_ref(), val.to_vec()); + for _ in 0..rc - 1 { + tx.set_from_vec(columns::STATE, key.as_ref(), val.to_vec()); + } + } + } else if rc < 0 { + if rc == -1 { + tx.remove(columns::STATE, key.as_ref()); + } else { + for _ in 0..-rc { + tx.remove(columns::STATE, key.as_ref()); + } + } + } + } + }, + } +} + /// Disk backend. /// /// Disk backend keeps data in a key-value store. In archive mode, trie nodes are kept from all /// blocks. Otherwise, trie nodes are kept only from some recent blocks. pub struct Backend { - storage: Arc>, + storage: StorageDb, offchain_storage: offchain::LocalStorage, blockchain: BlockchainDb, canonicalization_delay: u64, import_lock: Arc>, - is_archive: bool, + state_pruning: PruningMode, blocks_pruning: BlocksPruning, io_stats: FrozenForDuration<(kvdb::IoStats, StateUsageInfo)>, state_usage: Arc, - genesis_state: RwLock>>>, - shared_trie_cache: Option>>, + genesis_state: RwLock>>, + shared_trie_cache: Option, DBLocation>>, } impl Backend { @@ -1140,17 +1327,17 @@ impl Backend { use utils::OpenDbError; let db_source = &db_config.source; + let archive = db_config.state_pruning.as_ref().map_or(false, |p| p.is_archive()); - let (needs_init, db) = - match crate::utils::open_database::(db_source, DatabaseType::Full, false) { - Ok(db) => (false, db), - Err(OpenDbError::DoesNotExist) => { - let db = - crate::utils::open_database::(db_source, DatabaseType::Full, true)?; - (true, db) - }, - Err(as_is) => return Err(as_is.into()), - }; + let (needs_init, db) = match crate::utils::open_database::(db_source, false, archive) + { + Ok(db) => (false, db), + Err(OpenDbError::DoesNotExist) => { + let db = crate::utils::open_database::(db_source, true, archive)?; + (true, db) + }, + Err(as_is) => return Err(as_is.into()), + }; Self::from_database(db as Arc<_>, canonicalization_delay, &db_config, needs_init) } @@ -1204,7 +1391,7 @@ impl Backend { /// /// Should only be needed for benchmarking. #[cfg(any(feature = "runtime-benchmarks"))] - pub fn expose_storage(&self) -> Arc>> { + pub fn expose_storage(&self) -> StorageDb { self.storage.clone() } @@ -1214,38 +1401,44 @@ impl Backend { config: &DatabaseSettings, should_init: bool, ) -> ClientResult { - let mut db_init_transaction = Transaction::new(); - - let requested_state_pruning = config.state_pruning.clone(); - let state_meta_db = StateMetaDb(db.clone()); + let requested_mode = config.state_pruning.clone(); let map_e = sp_blockchain::Error::from_state_db; + let state_meta_db = StateMetaDb(db.clone()); + let (state_db, pruning_mode, init_commit) = match db.state_capabilities() { + StateCapabilities::TreeColumn => { + let (init_commit, actual_mode) = StateDb::::open_meta( + &state_meta_db, + requested_mode, + should_init, + ) + .map_err(map_e)?; + (None, actual_mode, init_commit) + }, + StateCapabilities::RefCounted | StateCapabilities::None => { + let (init_commit, state_db) = + StateDb::open(state_meta_db, requested_mode, true, should_init) + .map_err(map_e)?; + let state_pruning_used = state_db.pruning_mode(); + (Some(Arc::new(state_db)), state_pruning_used, init_commit) + }, + }; - let (state_db_init_commit_set, state_db) = StateDb::open( - state_meta_db, - requested_state_pruning, - !db.supports_ref_counting(), - should_init, - ) - .map_err(map_e)?; - - apply_state_commit(&mut db_init_transaction, state_db_init_commit_set); + let mut db_init_transaction = Transaction::new(); + apply_state_commit(&mut db_init_transaction, init_commit); - let state_pruning_used = state_db.pruning_mode(); - let is_archive_pruning = state_pruning_used.is_archive(); let blockchain = BlockchainDb::new(db.clone())?; - let storage_db = - StorageDb { db: db.clone(), state_db, prefix_keys: !db.supports_ref_counting() }; + let storage_db = StorageDb { db: db.clone(), state_db }; let offchain_storage = offchain::LocalStorage::new(db.clone()); let backend = Backend { - storage: Arc::new(storage_db), + storage: storage_db, offchain_storage, blockchain, canonicalization_delay, import_lock: Default::default(), - is_archive: is_archive_pruning, + state_pruning: pruning_mode, io_stats: FrozenForDuration::new(std::time::Duration::from_secs(1)), state_usage: Arc::new(StateUsageStats::new()), blocks_pruning: config.blocks_pruning, @@ -1408,46 +1601,48 @@ impl Backend { &self, transaction: &mut Transaction, ) -> ClientResult<()> { - let best_canonical = match self.storage.state_db.last_canonicalized() { - LastCanonicalized::None => 0, - LastCanonicalized::Block(b) => b, - // Nothing needs to be done when canonicalization is not happening. - LastCanonicalized::NotCanonicalizing => return Ok(()), - }; + if let Some(state_db) = &self.storage.state_db { + let best_canonical = match state_db.last_canonicalized() { + LastCanonicalized::None => 0, + LastCanonicalized::Block(b) => b, + // Nothing needs to be done when canonicalization is not happening. + LastCanonicalized::NotCanonicalizing => return Ok(()), + }; - let info = self.blockchain.info(); - let best_number: u64 = self.blockchain.info().best_number.saturated_into(); + let info = self.blockchain.info(); + let best_number: u64 = self.blockchain.info().best_number.saturated_into(); - for to_canonicalize in - best_canonical + 1..=best_number.saturating_sub(self.canonicalization_delay) - { - let hash_to_canonicalize = sc_client_api::blockchain::HeaderBackend::hash( - &self.blockchain, - to_canonicalize.saturated_into(), - )? - .ok_or_else(|| { - let best_hash = info.best_hash; - - sp_blockchain::Error::Backend(format!( - "Can't canonicalize missing block number #{to_canonicalize} when for best block {best_hash:?} (#{best_number})", - )) - })?; - - if !sc_client_api::Backend::have_state_at( - self, - hash_to_canonicalize, - to_canonicalize.saturated_into(), - ) { - return Ok(()) - } + for to_canonicalize in + best_canonical + 1..=best_number.saturating_sub(self.canonicalization_delay) + { + let hash_to_canonicalize = sc_client_api::blockchain::HeaderBackend::hash( + &self.blockchain, + to_canonicalize.saturated_into(), + )? + .ok_or_else(|| { + let best_hash = info.best_hash; + + sp_blockchain::Error::Backend(format!( + "Can't canonicalize missing block number #{to_canonicalize} when for best block {best_hash:?} (#{best_number})", + )) + })?; - trace!(target: "db", "Canonicalize block #{} ({:?})", to_canonicalize, hash_to_canonicalize); - let commit = self.storage.state_db.canonicalize_block(&hash_to_canonicalize).map_err( - sp_blockchain::Error::from_state_db::< - sc_state_db::Error, - >, - )?; - apply_state_commit(transaction, commit); + if !sc_client_api::Backend::have_state_at( + self, + hash_to_canonicalize, + to_canonicalize.saturated_into(), + ) { + return Ok(()) + } + + trace!(target: "db", "Canonicalize block #{} ({:?})", to_canonicalize, hash_to_canonicalize); + let commit = state_db.canonicalize_block(&hash_to_canonicalize).map_err( + sp_blockchain::Error::from_state_db::< + sc_state_db::Error, + >, + )?; + apply_state_commit(transaction, commit); + } } Ok(()) @@ -1536,47 +1731,122 @@ impl Backend { // When we don't want to commit the genesis state, we still preserve it in // memory to bootstrap consensus. It is queried for an initial list of // authorities, etc. - *self.genesis_state.write() = Some(Arc::new(DbGenesisStorage::new( + let mut genesis_state = MemoryDB::default(); + operation.db_updates.apply_to(&mut genesis_state); + *self.genesis_state.write() = Some(DbGenesisStorage::new( *pending_block.header.state_root(), - operation.db_updates.clone(), - ))); + genesis_state, + )); } } let finalized = if operation.commit_state { - let mut changeset: sc_state_db::ChangeSet> = - sc_state_db::ChangeSet::default(); - let mut ops: u64 = 0; - let mut bytes: u64 = 0; - let mut removal: u64 = 0; - let mut bytes_removal: u64 = 0; - for (mut key, (val, rc)) in operation.db_updates.drain() { - self.storage.db.sanitize_key(&mut key); - if rc > 0 { - ops += 1; - bytes += key.len() as u64 + val.len() as u64; - if rc == 1 { - changeset.inserted.push((key, val.to_vec())); - } else { - changeset.inserted.push((key.clone(), val.to_vec())); - for _ in 0..rc - 1 { - changeset.inserted.push((key.clone(), Default::default())); + let number_u64 = number.saturated_into::(); + let trie_commit = operation.db_updates; + let mut ops = 0; + let mut bytes = 0; + if let Some(state_db) = &self.storage.state_db { + let mut changeset: sc_state_db::ChangeSet> = + sc_state_db::ChangeSet::default(); + + let mut removal: u64 = 0; + let mut bytes_removal: u64 = 0; + + if self.storage.db.state_capabilities().needs_key_prefixing() { + let mut memdb = sp_trie::PrefixedMemoryDB::>::default(); + trie_commit.apply_to(&mut memdb); + for (key, (val, rc)) in memdb.drain() { + if rc > 0 { + ops += 1; + bytes += key.len() as u64 + val.len() as u64; + if rc == 1 { + changeset.inserted.push((key.clone(), val.to_vec())); + } else { + changeset.inserted.push((key.clone(), val.to_vec())); + for _ in 0..rc - 1 { + changeset.inserted.push((key.clone(), Default::default())); + } + } + } else if rc < 0 { + removal += 1; + bytes_removal += key.len() as u64; + if rc == -1 { + changeset.deleted.push(key.to_vec()); + } else { + for _ in 0..-rc { + changeset.deleted.push(key.to_vec()); + } + } } } - } else if rc < 0 { - removal += 1; - bytes_removal += key.len() as u64; - if rc == -1 { - changeset.deleted.push(key); - } else { - for _ in 0..-rc { - changeset.deleted.push(key.clone()); + } else { + let mut memdb = sp_trie::MemoryDB::>::default(); + trie_commit.apply_to(&mut memdb); + for (key, (val, rc)) in memdb.drain() { + let key = key.as_ref(); + if rc > 0 { + ops += 1; + bytes += key.len() as u64 + val.len() as u64; + if rc == 1 { + changeset.inserted.push((key.to_vec(), val.to_vec())); + } else { + changeset.inserted.push((key.to_vec(), val.to_vec())); + for _ in 0..rc - 1 { + changeset.inserted.push((key.to_vec(), val.to_vec())); + } + } + } else if rc < 0 { + removal += 1; + bytes_removal += key.len() as u64; + if rc == -1 { + changeset.deleted.push(key.to_vec()); + } else { + for _ in 0..-rc { + changeset.deleted.push(key.to_vec()); + } + } } } } + + self.state_usage.tally_writes_nodes(ops, bytes); + self.state_usage.tally_removed_nodes(removal, bytes_removal); + let commit = state_db + .insert_block( + &hash, + number_u64, + pending_block.header.parent_hash(), + changeset, + ) + .map_err(|e: sc_state_db::Error| { + sp_blockchain::Error::from_state_db(e) + })?; + apply_state_commit(&mut transaction, commit); + + if number <= last_finalized_num { + // Canonicalize in the db when re-importing existing blocks with state. + let commit = state_db.canonicalize_block(&hash).map_err( + sp_blockchain::Error::from_state_db::< + sc_state_db::Error, + >, + )?; + apply_state_commit(&mut transaction, commit); + meta_updates.push(MetaUpdate { + hash, + number, + is_best: false, + is_finalized: true, + with_state: true, + }); + } + } else { + // Just write changes to the db. + apply_tree_commit::>( + trie_commit, + self.storage.db.state_capabilities(), + &mut transaction, + ); } - self.state_usage.tally_writes_nodes(ops, bytes); - self.state_usage.tally_removed_nodes(removal, bytes_removal); let mut ops: u64 = 0; let mut bytes: u64 = 0; @@ -1592,31 +1862,6 @@ impl Backend { } } self.state_usage.tally_writes(ops, bytes); - let number_u64 = number.saturated_into::(); - let commit = self - .storage - .state_db - .insert_block(&hash, number_u64, pending_block.header.parent_hash(), changeset) - .map_err(|e: sc_state_db::Error| { - sp_blockchain::Error::from_state_db(e) - })?; - apply_state_commit(&mut transaction, commit); - if number <= last_finalized_num { - // Canonicalize in the db when re-importing existing blocks with state. - let commit = self.storage.state_db.canonicalize_block(&hash).map_err( - sp_blockchain::Error::from_state_db::< - sc_state_db::Error, - >, - )?; - apply_state_commit(&mut transaction, commit); - meta_updates.push(MetaUpdate { - hash, - number, - is_best: false, - is_finalized: true, - with_state: true, - }); - } // Check if need to finalize. Genesis is always finalized instantly. let finalized = number_u64 == 0 || pending_block.leaf_state.is_final(); @@ -1798,19 +2043,23 @@ impl Backend { } transaction.set_from_vec(columns::META, meta_keys::FINALIZED_BLOCK, lookup_key); - let requires_canonicalization = match self.storage.state_db.last_canonicalized() { - LastCanonicalized::None => true, - LastCanonicalized::Block(b) => f_num.saturated_into::() > b, - LastCanonicalized::NotCanonicalizing => false, - }; + if let Some(state_db) = &self.storage.state_db { + let requires_canonicalization = match state_db.last_canonicalized() { + LastCanonicalized::None => true, + LastCanonicalized::Block(b) => f_num.saturated_into::() > b, + LastCanonicalized::NotCanonicalizing => false, + }; - if requires_canonicalization && sc_client_api::Backend::have_state_at(self, f_hash, f_num) { - let commit = self.storage.state_db.canonicalize_block(&f_hash).map_err( - sp_blockchain::Error::from_state_db::< - sc_state_db::Error, - >, - )?; - apply_state_commit(transaction, commit); + if requires_canonicalization && + sc_client_api::Backend::have_state_at(self, f_hash, f_num) + { + let commit = state_db.canonicalize_block(&f_hash).map_err( + sp_blockchain::Error::from_state_db::< + sc_state_db::Error, + >, + )?; + apply_state_commit(transaction, commit); + } } let new_displaced = self.blockchain.leaves.write().finalize_height(f_num); @@ -1843,8 +2092,6 @@ impl Backend { // Before we prune a block, check if it is pinned if let Some(hash) = self.blockchain.hash(number)? { - self.blockchain.insert_persisted_body_if_pinned(hash)?; - // If the block was finalized in this transaction, it will not be in the db // yet. if let Some(justification) = @@ -1854,14 +2101,31 @@ impl Backend { } else { self.blockchain.insert_persisted_justifications_if_pinned(hash)?; } - }; - self.prune_block(transaction, BlockId::::number(number))?; + self.prune_block(transaction, hash, true, false)?; + }; } - self.prune_displaced_branches(transaction, finalized_hash, displaced)?; + self.prune_displaced_branches(transaction, finalized_hash, displaced, true, false)?; }, BlocksPruning::KeepFinalized => { - self.prune_displaced_branches(transaction, finalized_hash, displaced)?; + self.prune_displaced_branches(transaction, finalized_hash, displaced, true, false)?; + }, + } + match self.state_pruning { + PruningMode::ArchiveAll => {}, + PruningMode::Constrained(sc_state_db::Constraints { max_blocks }) => { + // Always keep the last finalized block + let keep = std::cmp::max(max_blocks.unwrap_or_default(), 1); + if finalized_number >= keep.into() { + let number = finalized_number.saturating_sub(keep.into()); + if let Some(hash) = self.blockchain.hash(number)? { + self.prune_block(transaction, hash, false, true)?; + } + } + self.prune_displaced_branches(transaction, finalized_hash, displaced, false, true)?; + }, + PruningMode::ArchiveCanonical => { + self.prune_displaced_branches(transaction, finalized_hash, displaced, false, true)?; }, } Ok(()) @@ -1872,14 +2136,15 @@ impl Backend { transaction: &mut Transaction, finalized: Block::Hash, displaced: &FinalizationOutcome>, + clean_body: bool, + clean_state: bool, ) -> ClientResult<()> { // Discard all blocks from displaced branches for h in displaced.leaves() { match sp_blockchain::tree_route(&self.blockchain, *h, finalized) { Ok(tree_route) => for r in tree_route.retracted() { - self.blockchain.insert_persisted_body_if_pinned(r.hash)?; - self.prune_block(transaction, BlockId::::hash(r.hash))?; + self.prune_block(transaction, r.hash, clean_body, clean_state)?; }, Err(sp_blockchain::Error::UnknownBlock(_)) => { // Sometimes routes can't be calculated. E.g. after warp sync. @@ -1893,45 +2158,62 @@ impl Backend { fn prune_block( &self, transaction: &mut Transaction, - id: BlockId, + hash: Block::Hash, + clean_body: bool, + clean_state: bool, ) -> ClientResult<()> { - debug!(target: "db", "Removing block #{}", id); - utils::remove_from_db( - transaction, - &*self.storage.db, - columns::KEY_LOOKUP, - columns::BODY, - id, - )?; - utils::remove_from_db( - transaction, - &*self.storage.db, - columns::KEY_LOOKUP, - columns::JUSTIFICATIONS, - id, - )?; - if let Some(index) = - read_db(&*self.storage.db, columns::KEY_LOOKUP, columns::BODY_INDEX, id)? - { + let id = BlockId::::hash(hash); + debug!(target: "db", "Removing block {}. Body:{}, State:{}", hash, clean_body, clean_state); + if clean_state { + match self.blockchain.header_metadata(hash) { + Ok(hdr) => { + transaction + .release_tree(columns::STATE, DbHash::from_slice(hdr.state_root.as_ref())); + }, + Err(e) => { + log::debug!(target: "db", "Failed to get header metadata for block #{}: {:?}", id, e) + }, + } + } + if clean_body { + self.blockchain.insert_persisted_body_if_pinned(hash)?; utils::remove_from_db( transaction, &*self.storage.db, columns::KEY_LOOKUP, - columns::BODY_INDEX, + columns::BODY, id, )?; - match Vec::>::decode(&mut &index[..]) { - Ok(index) => - for ex in index { - if let DbExtrinsic::Indexed { hash, .. } = ex { - transaction.release(columns::TRANSACTION, hash); - } - }, - Err(err) => - return Err(sp_blockchain::Error::Backend(format!( - "Error decoding body list: {}", - err - ))), + utils::remove_from_db( + transaction, + &*self.storage.db, + columns::KEY_LOOKUP, + columns::JUSTIFICATIONS, + id, + )?; + if let Some(index) = + read_db(&*self.storage.db, columns::KEY_LOOKUP, columns::BODY_INDEX, id)? + { + utils::remove_from_db( + transaction, + &*self.storage.db, + columns::KEY_LOOKUP, + columns::BODY_INDEX, + id, + )?; + match Vec::>::decode(&mut &index[..]) { + Ok(index) => + for ex in index { + if let DbExtrinsic::Indexed { hash, .. } = ex { + transaction.release(columns::TRANSACTION, hash); + } + }, + Err(err) => + return Err(sp_blockchain::Error::Backend(format!( + "Error decoding body list: {}", + err + ))), + } } } Ok(()) @@ -1939,9 +2221,10 @@ impl Backend { fn empty_state(&self) -> RecordStatsState, Block> { let root = EmptyStorage::::new().0; // Empty trie - let db_state = DbStateBuilder::>::new(self.storage.clone(), root) - .with_optional_cache(self.shared_trie_cache.as_ref().map(|c| c.local_cache())) - .build(); + let db_state = + DbStateBuilder::>::new(Box::new(self.storage.clone()), root) + .with_optional_cache(self.shared_trie_cache.as_ref().map(|c| c.local_cache())) + .build(); let state = RefTrackingState::new(db_state, self.storage.clone(), None); RecordStatsState::new(state, None, self.state_usage.clone()) } @@ -2072,7 +2355,7 @@ impl sc_client_api::backend::Backend for Backend { Ok(BlockImportOperation { pending_block: None, old_state: self.empty_state(), - db_updates: PrefixedMemoryDB::default(), + db_updates: BackendTransaction::unchanged(Default::default()), storage_updates: Default::default(), child_storage_updates: Default::default(), offchain_storage_updates: Default::default(), @@ -2104,15 +2387,16 @@ impl sc_client_api::backend::Backend for Backend { self.state_usage.merge_sm(usage); if let Err(e) = self.try_commit_operation(operation) { - let state_meta_db = StateMetaDb(self.storage.db.clone()); - self.storage - .state_db - .reset(state_meta_db) - .map_err(sp_blockchain::Error::from_state_db)?; + if let Some(state_db) = &self.storage.state_db { + let state_meta_db = StateMetaDb(self.storage.db.clone()); + state_db.reset(state_meta_db).map_err(sp_blockchain::Error::from_state_db)?; + } self.blockchain.clear_pinning_cache(); Err(e) } else { - self.storage.state_db.sync(); + if let Some(state_db) = &self.storage.state_db { + state_db.sync(); + } Ok(()) } } @@ -2268,72 +2552,68 @@ impl sc_client_api::backend::Backend for Backend { return Ok(c.saturated_into::>()) } - match self.storage.state_db.revert_one() { - Some(commit) => { - apply_state_commit(&mut transaction, commit); + if let Some(state_db) = &self.storage.state_db { + match state_db.revert_one() { + Some(commit) => { + apply_state_commit(&mut transaction, commit); + }, + None => return Ok(c.saturated_into::>()), + } + } - number_to_revert = prev_number; - hash_to_revert = prev_hash; + number_to_revert = prev_number; + hash_to_revert = prev_hash; - let update_finalized = number_to_revert < finalized; + let update_finalized = number_to_revert < finalized; - let key = utils::number_and_hash_to_lookup_key( - number_to_revert, - &hash_to_revert, - )?; - if update_finalized { - transaction.set_from_vec( - columns::META, - meta_keys::FINALIZED_BLOCK, - key.clone(), - ); + let key = utils::number_and_hash_to_lookup_key(number_to_revert, &hash_to_revert)?; + if update_finalized { + transaction.set_from_vec( + columns::META, + meta_keys::FINALIZED_BLOCK, + key.clone(), + ); - reverted_finalized.insert(removed_hash); - if let Some((hash, _)) = self.blockchain.info().finalized_state { - if hash == hash_to_revert { - if !number_to_revert.is_zero() && - self.have_state_at( - prev_hash, - number_to_revert - One::one(), - ) { - let lookup_key = utils::number_and_hash_to_lookup_key( - number_to_revert - One::one(), - prev_hash, - )?; - transaction.set_from_vec( - columns::META, - meta_keys::FINALIZED_STATE, - lookup_key, - ); - } else { - transaction - .remove(columns::META, meta_keys::FINALIZED_STATE); - } - } + reverted_finalized.insert(removed_hash); + if let Some((hash, _)) = self.blockchain.info().finalized_state { + if hash == hash_to_revert { + if !number_to_revert.is_zero() && + self.have_state_at(prev_hash, number_to_revert - One::one()) + { + let lookup_key = utils::number_and_hash_to_lookup_key( + number_to_revert - One::one(), + prev_hash, + )?; + transaction.set_from_vec( + columns::META, + meta_keys::FINALIZED_STATE, + lookup_key, + ); + } else { + transaction.remove(columns::META, meta_keys::FINALIZED_STATE); } } - transaction.set_from_vec(columns::META, meta_keys::BEST_BLOCK, key); - transaction.remove(columns::KEY_LOOKUP, removed.hash().as_ref()); - children::remove_children( - &mut transaction, - columns::META, - meta_keys::CHILDREN_PREFIX, - hash_to_revert, - ); - self.storage.db.commit(transaction)?; + } + } + transaction.set_from_vec(columns::META, meta_keys::BEST_BLOCK, key); + transaction.remove(columns::KEY_LOOKUP, removed.hash().as_ref()); + children::remove_children( + &mut transaction, + columns::META, + meta_keys::CHILDREN_PREFIX, + hash_to_revert, + ); + self.storage.db.commit(transaction)?; - let is_best = number_to_revert < best_number; + let is_best = number_to_revert < best_number; - self.blockchain.update_meta(MetaUpdate { - hash: hash_to_revert, - number: number_to_revert, - is_best, - is_finalized: update_finalized, - with_state: false, - }); - }, - None => return Ok(c.saturated_into::>()), - } + self.blockchain.update_meta(MetaUpdate { + hash: hash_to_revert, + number: number_to_revert, + is_best, + is_finalized: update_finalized, + with_state: false, + }); } Ok(n) @@ -2381,8 +2661,10 @@ impl sc_client_api::backend::Backend for Backend { } let mut transaction = Transaction::new(); - if let Some(commit) = self.storage.state_db.remove(&hash) { - apply_state_commit(&mut transaction, commit); + if let Some(state_db) = &self.storage.state_db { + if let Some(commit) = state_db.remove(&hash) { + apply_state_commit(&mut transaction, commit); + } } transaction.remove(columns::KEY_LOOKUP, hash.as_ref()); @@ -2432,7 +2714,7 @@ impl sc_client_api::backend::Backend for Backend { if let Some(genesis_state) = &*self.genesis_state.read() { let root = genesis_state.root; let db_state = - DbStateBuilder::>::new(genesis_state.clone(), root) + DbStateBuilder::>::new(Box::new(genesis_state.clone()), root) .with_optional_cache( self.shared_trie_cache.as_ref().map(|c| c.local_cache()), ) @@ -2445,22 +2727,19 @@ impl sc_client_api::backend::Backend for Backend { match self.blockchain.header_metadata(hash) { Ok(ref hdr) => { - let hint = || { - sc_state_db::NodeDb::get(self.storage.as_ref(), hdr.state_root.as_ref()) - .unwrap_or(None) - .is_some() - }; + let hint = || self.storage.contains_root(&hdr.state_root); if let Ok(()) = - self.storage.state_db.pin(&hash, hdr.number.saturated_into::(), hint) - { + self.storage.state_db.as_ref().map_or(Ok(()), |db| { + db.pin(&hash, hdr.number.saturated_into::(), hint) + }) { let root = hdr.state_root; - let db_state = - DbStateBuilder::>::new(self.storage.clone(), root) - .with_optional_cache( - self.shared_trie_cache.as_ref().map(|c| c.local_cache()), - ) - .build(); + let db_state = DbStateBuilder::>::new( + Box::new(self.storage.clone()), + root, + ) + .with_optional_cache(self.shared_trie_cache.as_ref().map(|c| c.local_cache())) + .build(); let state = RefTrackingState::new(db_state, self.storage.clone(), Some(hash)); Ok(RecordStatsState::new(state, Some(hash), self.state_usage.clone())) } else { @@ -2475,29 +2754,19 @@ impl sc_client_api::backend::Backend for Backend { } fn have_state_at(&self, hash: Block::Hash, number: NumberFor) -> bool { - if self.is_archive { + if self.state_pruning.is_archive() { match self.blockchain.header_metadata(hash) { - Ok(header) => sp_state_machine::Storage::get( - self.storage.as_ref(), - &header.state_root, - (&[], None), - ) - .unwrap_or(None) - .is_some(), + Ok(header) => self.storage.contains_root(&header.state_root), _ => false, } } else { - match self.storage.state_db.is_pruned(&hash, number.saturated_into::()) { + match self.storage.state_db.as_ref().map_or(IsPruned::MaybePruned, |db| { + db.is_pruned(&hash, number.saturated_into::()) + }) { IsPruned::Pruned => false, IsPruned::NotPruned => true, IsPruned::MaybePruned => match self.blockchain.header_metadata(hash) { - Ok(header) => sp_state_machine::Storage::get( - self.storage.as_ref(), - &header.state_root, - (&[], None), - ) - .unwrap_or(None) - .is_some(), + Ok(header) => self.storage.contains_root(&header.state_root), _ => false, }, } @@ -2509,33 +2778,26 @@ impl sc_client_api::backend::Backend for Backend { } fn requires_full_sync(&self) -> bool { - matches!( - self.storage.state_db.pruning_mode(), - PruningMode::ArchiveAll | PruningMode::ArchiveCanonical - ) + self.state_pruning.is_archive() } fn pin_block(&self, hash: ::Hash) -> sp_blockchain::Result<()> { let hint = || { let header_metadata = self.blockchain.header_metadata(hash); header_metadata - .map(|hdr| { - sc_state_db::NodeDb::get(self.storage.as_ref(), hdr.state_root.as_ref()) - .unwrap_or(None) - .is_some() - }) + .map(|hdr| self.storage.contains_root(&hdr.state_root)) .unwrap_or(false) }; if let Some(number) = self.blockchain.number(hash)? { - self.storage.state_db.pin(&hash, number.saturated_into::(), hint).map_err( - |_| { + if let Some(state_db) = &self.storage.state_db { + state_db.pin(&hash, number.saturated_into::(), hint).map_err(|_| { sp_blockchain::Error::UnknownBlock(format!( "Unable to pin: state already discarded for `{:?}`", hash )) - }, - )?; + })?; + } } else { return Err(ClientError::UnknownBlock(format!( "Can not pin block with hash `{:?}`. Block not found.", @@ -2551,7 +2813,9 @@ impl sc_client_api::backend::Backend for Backend { } fn unpin_block(&self, hash: ::Hash) { - self.storage.state_db.unpin(&hash); + if let Some(state_db) = &self.storage.state_db { + state_db.unpin(&hash); + } if self.blocks_pruning != BlocksPruning::KeepAll { self.blockchain.unpin(hash); @@ -2565,18 +2829,19 @@ impl sc_client_api::backend::LocalBackend for Backend::new_test(1, 0); + let data = b"hello"; + let key = blake2_256(data.as_slice()); + let hash = { let mut op = backend.begin_operation().unwrap(); backend.begin_state_operation(&mut op, Default::default()).unwrap(); @@ -2819,7 +3091,7 @@ pub(crate) mod tests { }; header.state_root = - op.old_state.storage_root(std::iter::empty(), state_version).0.into(); + op.old_state.storage_root(std::iter::empty(), state_version).root_hash().into(); let hash = header.hash(); op.reset_storage( @@ -2828,7 +3100,13 @@ pub(crate) mod tests { ) .unwrap(); - key = op.db_updates.insert(EMPTY_PREFIX, b"hello"); + op.db_updates = sp_trie::Changeset::New(sp_trie::NewChangesetNode { + hash: key.into(), + prefix: Default::default(), + data: data.to_vec(), + children: Default::default(), + removed_keys: None, + }); op.set_block_data(header, Some(vec![]), None, None, NewBlockState::Best) .unwrap(); @@ -2837,7 +3115,10 @@ pub(crate) mod tests { backend .storage .db - .get(columns::STATE, &sp_trie::prefixed_key::(&key, EMPTY_PREFIX)) + .get( + columns::STATE, + &sp_trie::prefixed_key::(&key.into(), EMPTY_PREFIX) + ) .unwrap(), &b"hello"[..] ); @@ -2859,13 +3140,21 @@ pub(crate) mod tests { header.state_root = op .old_state - .storage_root(storage.iter().cloned().map(|(x, y)| (x, Some(y))), state_version) - .0 + .storage_root( + storage.iter().cloned().map(|(x, y)| (x, Some(y), None)), + state_version, + ) + .root_hash() .into(); let hash = header.hash(); - op.db_updates.insert(EMPTY_PREFIX, b"hello"); - op.db_updates.remove(&key, EMPTY_PREFIX); + op.db_updates = sp_trie::Changeset::New(sp_trie::NewChangesetNode { + hash: key.into(), + prefix: Default::default(), + data: data.to_vec(), + children: Default::default(), + removed_keys: Some((None, vec![(key.into(), Default::default())])), + }); op.set_block_data(header, Some(vec![]), None, None, NewBlockState::Best) .unwrap(); @@ -2874,7 +3163,10 @@ pub(crate) mod tests { backend .storage .db - .get(columns::STATE, &sp_trie::prefixed_key::(&key, EMPTY_PREFIX)) + .get( + columns::STATE, + &sp_trie::prefixed_key::(&key.into(), EMPTY_PREFIX) + ) .unwrap(), &b"hello"[..] ); @@ -2896,12 +3188,22 @@ pub(crate) mod tests { header.state_root = op .old_state - .storage_root(storage.iter().cloned().map(|(x, y)| (x, Some(y))), state_version) - .0 + .storage_root( + storage.iter().cloned().map(|(x, y)| (x, Some(y), None)), + state_version, + ) + .root_hash() .into(); let hash = header.hash(); - op.db_updates.remove(&key, EMPTY_PREFIX); + op.db_updates = sp_trie::Changeset::New(sp_trie::NewChangesetNode { + hash: Default::default(), + prefix: Default::default(), + data: Default::default(), + children: Default::default(), + removed_keys: Some((None, vec![(key.into(), Default::default())])), + }); + op.set_block_data(header, Some(vec![]), None, None, NewBlockState::Best) .unwrap(); @@ -2910,7 +3212,10 @@ pub(crate) mod tests { assert!(backend .storage .db - .get(columns::STATE, &sp_trie::prefixed_key::(&key, EMPTY_PREFIX)) + .get( + columns::STATE, + &sp_trie::prefixed_key::(&key.into(), EMPTY_PREFIX) + ) .is_some()); hash }; @@ -2930,8 +3235,11 @@ pub(crate) mod tests { header.state_root = op .old_state - .storage_root(storage.iter().cloned().map(|(x, y)| (x, Some(y))), state_version) - .0 + .storage_root( + storage.iter().cloned().map(|(x, y)| (x, Some(y), None)), + state_version, + ) + .root_hash() .into(); let hash = header.hash(); @@ -2957,8 +3265,11 @@ pub(crate) mod tests { header.state_root = op .old_state - .storage_root(storage.iter().cloned().map(|(x, y)| (x, Some(y))), state_version) - .0 + .storage_root( + storage.iter().cloned().map(|(x, y)| (x, Some(y), None)), + state_version, + ) + .root_hash() .into(); let hash = header.hash(); @@ -2969,7 +3280,10 @@ pub(crate) mod tests { assert!(backend .storage .db - .get(columns::STATE, &sp_trie::prefixed_key::(&key, EMPTY_PREFIX)) + .get( + columns::STATE, + &sp_trie::prefixed_key::(&key.into(), EMPTY_PREFIX) + ) .is_none()); hash }; @@ -2981,7 +3295,7 @@ pub(crate) mod tests { assert!(backend .storage .db - .get(columns::STATE, &sp_trie::prefixed_key::(&key, EMPTY_PREFIX)) + .get(columns::STATE, &sp_trie::prefixed_key::(&key.into(), EMPTY_PREFIX)) .is_none()); } @@ -3165,29 +3479,6 @@ pub(crate) mod tests { assert!(tree_route.retracted().is_empty()); } - #[test] - fn test_leaves_with_complex_block_tree() { - let backend: Arc> = - Arc::new(Backend::new_test(20, 20)); - substrate_test_runtime_client::trait_tests::test_leaves_for_backend(backend); - } - - #[test] - fn test_children_with_complex_block_tree() { - let backend: Arc> = - Arc::new(Backend::new_test(20, 20)); - substrate_test_runtime_client::trait_tests::test_children_for_backend(backend); - } - - #[test] - fn test_blockchain_query_by_number_gets_canonical() { - let backend: Arc> = - Arc::new(Backend::new_test(20, 20)); - substrate_test_runtime_client::trait_tests::test_blockchain_query_by_number_gets_canonical( - backend, - ); - } - #[test] fn test_leaves_pruned_on_finality() { let backend: Backend = Backend::new_test(10, 10); @@ -3217,8 +3508,7 @@ pub(crate) mod tests { #[test] fn test_aux() { - let backend: Backend = - Backend::new_test(0, 0); + let backend: Backend = Backend::new_test(0, 0); assert!(backend.get_aux(b"test").unwrap().is_none()); backend.insert_aux(&[(&b"test"[..], &b"hello"[..])], &[]).unwrap(); assert_eq!(b"hello", &backend.get_aux(b"test").unwrap().unwrap()[..]); @@ -3318,8 +3608,11 @@ pub(crate) mod tests { header.state_root = op .old_state - .storage_root(storage.iter().map(|(x, y)| (&x[..], Some(&y[..]))), state_version) - .0 + .storage_root( + storage.iter().map(|(x, y)| (&x[..], Some(&y[..]), None)), + state_version, + ) + .root_hash() .into(); let hash = header.hash(); @@ -3354,11 +3647,12 @@ pub(crate) mod tests { let storage = vec![(b"test".to_vec(), Some(b"test2".to_vec()))]; - let (root, overlay) = op.old_state.storage_root( - storage.iter().map(|(k, v)| (k.as_slice(), v.as_ref().map(|v| &v[..]))), + let commit = op.old_state.storage_root( + storage.iter().map(|(k, v)| (k.as_slice(), v.as_ref().map(|v| &v[..]), None)), state_version, ); - op.update_db_storage(overlay).unwrap(); + let root = commit.root_hash(); + op.update_db_storage(commit).unwrap(); header.state_root = root.into(); let hash = header.hash(); @@ -4031,11 +4325,12 @@ pub(crate) mod tests { let storage = vec![(vec![1, 3, 5], None), (vec![5, 5, 5], Some(vec![4, 5, 6]))]; - let (root, overlay) = op.old_state.storage_root( - storage.iter().map(|(k, v)| (k.as_slice(), v.as_ref().map(|v| &v[..]))), + let commit = op.old_state.storage_root( + storage.iter().map(|(k, v)| (k.as_slice(), v.as_ref().map(|v| &v[..]), None)), StateVersion::V1, ); - op.update_db_storage(overlay).unwrap(); + let root = commit.root_hash(); + op.update_db_storage(commit).unwrap(); header.state_root = root.into(); op.update_storage(storage, Vec::new()).unwrap(); @@ -4057,7 +4352,7 @@ pub(crate) mod tests { if matches!(pruning_mode, BlocksPruning::Some(_)) { assert_eq!( LastCanonicalized::Block(0), - backend.storage.state_db.last_canonicalized() + backend.storage.state_db.as_ref().unwrap().last_canonicalized() ); } @@ -4076,11 +4371,12 @@ pub(crate) mod tests { let storage = vec![(vec![5, 5, 5], Some(vec![4, 5, 6, 2]))]; - let (root, overlay) = op.old_state.storage_root( - storage.iter().map(|(k, v)| (k.as_slice(), v.as_ref().map(|v| &v[..]))), + let commit = op.old_state.storage_root( + storage.iter().map(|(k, v)| (k.as_slice(), v.as_ref().map(|v| &v[..]), None)), StateVersion::V1, ); - op.update_db_storage(overlay).unwrap(); + let root = commit.root_hash(); + op.update_db_storage(commit).unwrap(); header.state_root = root.into(); op.update_storage(storage, Vec::new()).unwrap(); @@ -4102,7 +4398,7 @@ pub(crate) mod tests { if matches!(pruning_mode, BlocksPruning::Some(_)) { assert_eq!( LastCanonicalized::Block(0), - backend.storage.state_db.last_canonicalized() + backend.storage.state_db.as_ref().unwrap().last_canonicalized() ); } @@ -4121,11 +4417,12 @@ pub(crate) mod tests { let storage = vec![(vec![5, 5, 5], Some(vec![4, 5, 6, 3]))]; - let (root, overlay) = op.old_state.storage_root( - storage.iter().map(|(k, v)| (k.as_slice(), v.as_ref().map(|v| &v[..]))), + let commit = op.old_state.storage_root( + storage.iter().map(|(k, v)| (k.as_slice(), v.as_ref().map(|v| &v[..]), None)), StateVersion::V1, ); - op.update_db_storage(overlay).unwrap(); + let root = commit.root_hash(); + op.update_db_storage(commit).unwrap(); header.state_root = root.into(); op.update_storage(storage, Vec::new()).unwrap(); @@ -4158,11 +4455,12 @@ pub(crate) mod tests { let storage = vec![(vec![5, 5, 5], Some(vec![4, 5, 6, 4]))]; - let (root, overlay) = op.old_state.storage_root( - storage.iter().map(|(k, v)| (k.as_slice(), v.as_ref().map(|v| &v[..]))), + let commit = op.old_state.storage_root( + storage.iter().map(|(k, v)| (k.as_slice(), v.as_ref().map(|v| &v[..]), None)), StateVersion::V1, ); - op.update_db_storage(overlay).unwrap(); + let root = commit.root_hash(); + op.update_db_storage(commit).unwrap(); header.state_root = root.into(); op.update_storage(storage, Vec::new()).unwrap(); @@ -4184,7 +4482,7 @@ pub(crate) mod tests { if matches!(pruning_mode, BlocksPruning::Some(_)) { assert_eq!( LastCanonicalized::Block(2), - backend.storage.state_db.last_canonicalized() + backend.storage.state_db.as_ref().unwrap().last_canonicalized() ); } @@ -4439,4 +4737,144 @@ pub(crate) mod tests { backend.unpin_block(fork_hash_3); assert!(bc.body(fork_hash_3).unwrap().is_none()); } + + fn generate_trie( + db: &mut StorageDb, + key_values: impl IntoIterator, Vec)>, + ) -> H256 { + db.insert_empty_trie_node(); + let mut trie_db = TrieDBMutBuilderV1::::new(db).build(); + for (key, value) in key_values { + trie_db.insert(&key, &value).expect("trie insertion failed"); + } + + let commit = trie_db.commit(); + let root = commit.root_hash(); + + let mut transaction = Transaction::default(); + apply_tree_commit::(commit, db.db.state_capabilities(), &mut transaction); + + db.db.commit(transaction).expect("Failed to write transaction"); + + root + } + + fn query_trie<'a>( + db: &StorageDb, + root: H256, + key_values: impl IntoIterator, Vec)>, + ) { + use sp_state_machine::AsDB; + use trie_db::Trie; + let db = db.as_node_db(); + let trie_db = TrieDBBuilderV1::::new(db, &root).build(); + for (key, value) in key_values { + let queried = trie_db.get(&key).expect("trie insertion failed"); + assert_eq!(queried.as_ref(), Some(value)); + } + } + + // -------- Copied from substrate/bin/node/bench/src/tempdb.rs -------- + // should be moved here or test here moved to node bench + // (kind of prefer here due to compilation time) + #[derive(Clone, Copy, Debug)] + pub enum DatabaseType { + #[cfg(feature = "rocksdb")] + RocksDb, + ParityDb, + ParityDbMulti, + } + + pub struct TempDatabase(tempfile::TempDir); + + impl TempDatabase { + pub fn new() -> Self { + let dir = tempfile::tempdir().expect("temp dir creation failed"); + log::trace!( + target: "bench-logistics", + "Created temp db at {}", + dir.path().to_string_lossy(), + ); + + TempDatabase(dir) + } + + pub fn open(&mut self, db_type: DatabaseType) -> StorageDb { + match db_type { + #[cfg(feature = "rocksdb")] + DatabaseType::RocksDb => { + let db = open_database::( + &DatabaseSource::RocksDb { + path: self.0.path().into(), + cache_size: 128 * 1024 * 1024, + }, + true, + false, + ) + .expect("Database backend error"); + StorageDb:: { db, state_db: None } + }, + DatabaseType::ParityDbMulti | DatabaseType::ParityDb => { + let db = open_database::( + &DatabaseSource::ParityDb { + path: self.0.path().into(), + multi_tree: matches!(db_type, DatabaseType::ParityDbMulti), + }, + true, + false, + ) + .expect("Database backend error"); + StorageDb:: { db, state_db: None } + }, + } + } + } + + impl Clone for TempDatabase { + fn clone(&self) -> Self { + let new_dir = tempfile::tempdir().expect("temp dir creation failed"); + let self_dir = self.0.path(); + + log::trace!( + target: "bench-logistics", + "Cloning db ({}) to {}", + self_dir.to_string_lossy(), + new_dir.path().to_string_lossy(), + ); + let self_db_files = std::fs::read_dir(self_dir) + .expect("failed to list file in seed dir") + .map(|f_result| f_result.expect("failed to read file in seed db").path()) + .collect::>(); + fs_extra::copy_items( + &self_db_files, + new_dir.path(), + &fs_extra::dir::CopyOptions::new(), + ) + .expect("Copy of seed database is ok"); + + TempDatabase(new_dir) + } + } + // -------- End Copied from substrate/bin/node/bench/src/tempdb.rs -------- + + #[test] + fn check_state_on_db() { + let key_values = [ + (b"key1".to_vec(), b"value1".to_vec()), + // (b"key2".to_vec(), b"value2".to_vec()), + ]; + + let db_kind = [ + DatabaseType::ParityDbMulti, + DatabaseType::ParityDb, + #[cfg(feature = "rocksdb")] + DatabaseType::RocksDb, + ]; + for kind in db_kind { + let mut temp = TempDatabase::new(); + let mut database = temp.open(kind); + let root = generate_trie(&mut database, key_values.iter().cloned()); + query_trie(&database, root, &key_values); + } + } } diff --git a/substrate/client/db/src/parity_db.rs b/substrate/client/db/src/parity_db.rs index b7068f2430ef..e7e1d5573a98 100644 --- a/substrate/client/db/src/parity_db.rs +++ b/substrate/client/db/src/parity_db.rs @@ -15,14 +15,14 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use crate::{ - columns, - utils::{DatabaseType, NUM_COLUMNS}, -}; +use crate::{columns, utils::NUM_COLUMNS}; +use parity_db::Operation; /// A `Database` adapter for parity-db. -use sp_database::{error::DatabaseError, Change, ColumnId, Database, Transaction}; +use sp_database::{ + error::DatabaseError, Change, ColumnId, DBLocation, Database, StateCapabilities, Transaction, +}; -struct DbAdapter(parity_db::Db); +struct DbAdapter(parity_db::Db, bool); fn handle_err(result: parity_db::Result) -> T { match result { @@ -36,40 +36,44 @@ fn handle_err(result: parity_db::Result) -> T { /// Wrap parity-db database into a trait object that implements `sp_database::Database` pub fn open>( path: &std::path::Path, - db_type: DatabaseType, create: bool, upgrade: bool, + archive: bool, + multi_tree: bool, ) -> parity_db::Result>> { let mut config = parity_db::Options::with_columns(path, NUM_COLUMNS as u8); - match db_type { - DatabaseType::Full => { - let compressed = [ - columns::STATE, - columns::HEADER, - columns::BODY, - columns::BODY_INDEX, - columns::TRANSACTION, - columns::JUSTIFICATIONS, - ]; - - for i in compressed { - let column = &mut config.columns[i as usize]; - column.compression = parity_db::CompressionType::Lz4; - } + let compressed = [ + columns::STATE, + columns::HEADER, + columns::BODY, + columns::BODY_INDEX, + columns::TRANSACTION, + columns::JUSTIFICATIONS, + ]; + + for i in compressed { + let column = &mut config.columns[i as usize]; + column.compression = parity_db::CompressionType::Lz4; + } - let state_col = &mut config.columns[columns::STATE as usize]; - state_col.ref_counted = true; - state_col.preimage = true; - state_col.uniform = true; + let state_col = &mut config.columns[columns::STATE as usize]; + state_col.preimage = true; + state_col.uniform = true; + state_col.append_only = archive & multi_tree; + state_col.ref_counted = true & !state_col.append_only; - let tx_col = &mut config.columns[columns::TRANSACTION as usize]; - tx_col.ref_counted = true; - tx_col.preimage = true; - tx_col.uniform = true; - }, + if multi_tree { + state_col.multitree = true; + state_col.allow_direct_node_access = true; + state_col.compression = parity_db::CompressionType::NoCompression; } + let tx_col = &mut config.columns[columns::TRANSACTION as usize]; + tx_col.ref_counted = true; + tx_col.preimage = true; + tx_col.uniform = true; + if upgrade { log::info!("Upgrading database metadata."); if let Some(meta) = parity_db::Options::load_metadata(path)? { @@ -83,7 +87,7 @@ pub fn open>( parity_db::Db::open(&config)? }; - Ok(std::sync::Arc::new(DbAdapter(db))) + Ok(std::sync::Arc::new(DbAdapter(db, multi_tree))) } fn ref_counted_column(col: u32) -> bool { @@ -93,40 +97,43 @@ fn ref_counted_column(col: u32) -> bool { impl> Database for DbAdapter { fn commit(&self, transaction: Transaction) -> Result<(), DatabaseError> { let mut not_ref_counted_column = Vec::new(); - let result = self.0.commit(transaction.0.into_iter().filter_map(|change| { + let result = self.0.commit_changes(transaction.0.into_iter().filter_map(|change| { Some(match change { - Change::Set(col, key, value) => (col as u8, key, Some(value)), - Change::Remove(col, key) => (col as u8, key, None), + Change::Set(col, key, value) => (col as u8, Operation::Set(key, value)), + Change::Remove(col, key) => (col as u8, Operation::Dereference(key)), Change::Store(col, key, value) => if ref_counted_column(col) { - (col as u8, key.as_ref().to_vec(), Some(value)) + (col as u8, Operation::Set(key.as_ref().to_vec(), value)) } else { if !not_ref_counted_column.contains(&col) { not_ref_counted_column.push(col); } return None }, - Change::Reference(col, key) => { + Change::Reference(col, key) => if ref_counted_column(col) { - // FIXME accessing value is not strictly needed, optimize this in parity-db. - let value = >::get(self, col, key.as_ref()); - (col as u8, key.as_ref().to_vec(), value) + (col as u8, Operation::Reference(key.as_ref().to_vec())) } else { if !not_ref_counted_column.contains(&col) { not_ref_counted_column.push(col); } return None - } - }, + }, + Change::ReferenceTree(col, key) => + (col as u8, Operation::ReferenceTree(key.as_ref().to_vec())), Change::Release(col, key) => if ref_counted_column(col) { - (col as u8, key.as_ref().to_vec(), None) + (col as u8, Operation::Dereference(key.as_ref().to_vec())) } else { if !not_ref_counted_column.contains(&col) { not_ref_counted_column.push(col); } return None }, + Change::ReleaseTree(col, key) => + (col as u8, Operation::DereferenceTree(key.as_ref().to_vec())), + Change::StoreTree(col, key, tree) => + (col as u8, Operation::InsertTree(key.as_ref().to_vec(), tree)), }) })); @@ -152,11 +159,28 @@ impl> Database for DbAdapter { handle_err(self.0.get_size(col as u8, key)).map(|s| s as usize) } - fn supports_ref_counting(&self) -> bool { - true + fn get_node( + &self, + col: ColumnId, + key: &[u8], + location: DBLocation, + ) -> Option<(Vec, Vec)> { + if self.1 && col == columns::STATE { + if location == 0 { + handle_err(self.0.get_root(col as u8, key)) + } else { + handle_err(self.0.get_node(col as u8, location)) + } + } else { + handle_err(self.0.get(col as u8, key)).map(|v| (v, Default::default())) + } } - fn sanitize_key(&self, key: &mut Vec) { - let _prefix = key.drain(0..key.len() - crate::DB_HASH_LEN); + fn state_capabilities(&self) -> StateCapabilities { + if self.1 { + StateCapabilities::TreeColumn + } else { + StateCapabilities::RefCounted + } } } diff --git a/substrate/client/db/src/record_stats_state.rs b/substrate/client/db/src/record_stats_state.rs index d9a35c075d79..c19285c1a508 100644 --- a/substrate/client/db/src/record_stats_state.rs +++ b/substrate/client/db/src/record_stats_state.rs @@ -28,7 +28,7 @@ use sp_state_machine::{ backend::{AsTrieBackend, Backend as StateBackend}, BackendTransaction, IterArgs, StorageIterator, StorageKey, StorageValue, TrieBackend, }; -use sp_trie::MerkleValue; +use sp_trie::{ChildChangeset, MerkleValue}; use std::sync::Arc; /// State abstraction for recording stats about state access. @@ -110,7 +110,6 @@ impl>, B: BlockT> StateBackend> for RecordStatsState { type Error = S::Error; - type TrieBackendStorage = S::TrieBackendStorage; type RawIter = RawIter; fn storage(&self, key: &[u8]) -> Result>, Self::Error> { @@ -186,9 +185,9 @@ impl>, B: BlockT> StateBackend> fn storage_root<'a>( &self, - delta: impl Iterator)>, + delta: impl Iterator, Option>)>, state_version: StateVersion, - ) -> (B::Hash, BackendTransaction>) { + ) -> BackendTransaction { self.state.storage_root(delta, state_version) } @@ -197,7 +196,7 @@ impl>, B: BlockT> StateBackend> child_info: &ChildInfo, delta: impl Iterator)>, state_version: StateVersion, - ) -> (B::Hash, bool, BackendTransaction>) { + ) -> (BackendTransaction, bool) { self.state.child_storage_root(child_info, delta, state_version) } @@ -219,9 +218,10 @@ impl>, B: BlockT> StateBackend> impl> + AsTrieBackend>, B: BlockT> AsTrieBackend> for RecordStatsState { - type TrieBackendStorage = >>::TrieBackendStorage; - - fn as_trie_backend(&self) -> &TrieBackend> { + fn as_trie_backend(&self) -> &TrieBackend> { self.state.as_trie_backend() } + fn as_trie_backend_mut(&mut self) -> &mut TrieBackend> { + self.state.as_trie_backend_mut() + } } diff --git a/substrate/client/db/src/upgrade.rs b/substrate/client/db/src/upgrade.rs index f1e503867dfc..9749d55e31d8 100644 --- a/substrate/client/db/src/upgrade.rs +++ b/substrate/client/db/src/upgrade.rs @@ -24,7 +24,7 @@ use std::{ path::{Path, PathBuf}, }; -use crate::{columns, utils::DatabaseType}; +use crate::columns; use codec::{Decode, Encode}; use kvdb_rocksdb::{Database, DatabaseConfig}; use sp_runtime::traits::Block as BlockT; @@ -87,21 +87,21 @@ impl fmt::Display for UpgradeError { } /// Upgrade database to current version. -pub fn upgrade_db(db_path: &Path, db_type: DatabaseType) -> UpgradeResult<()> { +pub fn upgrade_db(db_path: &Path) -> UpgradeResult<()> { let db_version = current_version(db_path)?; match db_version { 0 => return Err(UpgradeError::UnsupportedVersion(db_version)), 1 => { - migrate_1_to_2::(db_path, db_type)?; - migrate_2_to_3::(db_path, db_type)?; - migrate_3_to_4::(db_path, db_type)?; + migrate_1_to_2::(db_path)?; + migrate_2_to_3::(db_path)?; + migrate_3_to_4::(db_path)?; }, 2 => { - migrate_2_to_3::(db_path, db_type)?; - migrate_3_to_4::(db_path, db_type)?; + migrate_2_to_3::(db_path)?; + migrate_3_to_4::(db_path)?; }, 3 => { - migrate_3_to_4::(db_path, db_type)?; + migrate_3_to_4::(db_path)?; }, CURRENT_VERSION => (), _ => return Err(UpgradeError::FutureDatabaseVersion(db_version)), @@ -113,7 +113,7 @@ pub fn upgrade_db(db_path: &Path, db_type: DatabaseType) -> Upgra /// Migration from version1 to version2: /// 1) the number of columns has changed from 11 to 12; /// 2) transactions column is added; -fn migrate_1_to_2(db_path: &Path, _db_type: DatabaseType) -> UpgradeResult<()> { +fn migrate_1_to_2(db_path: &Path) -> UpgradeResult<()> { let db_cfg = DatabaseConfig::with_columns(V1_NUM_COLUMNS); let mut db = Database::open(&db_cfg, db_path)?; db.add_column().map_err(Into::into) @@ -121,7 +121,7 @@ fn migrate_1_to_2(db_path: &Path, _db_type: DatabaseType) -> Upgr /// Migration from version2 to version3: /// - The format of the stored Justification changed to support multiple Justifications. -fn migrate_2_to_3(db_path: &Path, _db_type: DatabaseType) -> UpgradeResult<()> { +fn migrate_2_to_3(db_path: &Path) -> UpgradeResult<()> { let db_cfg = DatabaseConfig::with_columns(V2_NUM_COLUMNS); let db = Database::open(&db_cfg, db_path)?; @@ -153,7 +153,7 @@ fn migrate_2_to_3(db_path: &Path, _db_type: DatabaseType) -> Upgr /// Migration from version3 to version4: /// 1) the number of columns has changed from 12 to 13; /// 2) BODY_INDEX column is added; -fn migrate_3_to_4(db_path: &Path, _db_type: DatabaseType) -> UpgradeResult<()> { +fn migrate_3_to_4(db_path: &Path) -> UpgradeResult<()> { let db_cfg = DatabaseConfig::with_columns(V3_NUM_COLUMNS); let mut db = Database::open(&db_cfg, db_path)?; db.add_column().map_err(Into::into) @@ -194,6 +194,7 @@ fn version_file_path(path: &Path) -> PathBuf { mod tests { use super::*; use crate::{tests::Block, DatabaseSource}; + const FULL_DB_DIR: &str = "full"; fn create_db(db_path: &Path, version: Option) { if let Some(version) = version { @@ -203,11 +204,11 @@ mod tests { } } - fn open_database(db_path: &Path, db_type: DatabaseType) -> sp_blockchain::Result<()> { + fn open_database(db_path: &Path) -> sp_blockchain::Result<()> { crate::utils::open_database::( &DatabaseSource::RocksDb { path: db_path.to_owned(), cache_size: 128 }, - db_type, true, + false, ) .map(|_| ()) .map_err(|e| sp_blockchain::Error::Backend(e.to_string())) @@ -217,39 +218,36 @@ mod tests { fn downgrade_never_happens() { let db_dir = tempfile::TempDir::new().unwrap(); create_db(db_dir.path(), Some(CURRENT_VERSION + 1)); - assert!(open_database(db_dir.path(), DatabaseType::Full).is_err()); + assert!(open_database(db_dir.path()).is_err()); } #[test] fn open_empty_database_works() { - let db_type = DatabaseType::Full; let db_dir = tempfile::TempDir::new().unwrap(); - let db_dir = db_dir.path().join(db_type.as_str()); - open_database(&db_dir, db_type).unwrap(); - open_database(&db_dir, db_type).unwrap(); + let db_dir = db_dir.path().join(FULL_DB_DIR); + open_database(&db_dir).unwrap(); + open_database(&db_dir).unwrap(); assert_eq!(current_version(&db_dir).unwrap(), CURRENT_VERSION); } #[test] fn upgrade_to_3_works() { - let db_type = DatabaseType::Full; for version_from_file in &[None, Some(1), Some(2)] { let db_dir = tempfile::TempDir::new().unwrap(); - let db_path = db_dir.path().join(db_type.as_str()); + let db_path = db_dir.path().join(FULL_DB_DIR); create_db(&db_path, *version_from_file); - open_database(&db_path, db_type).unwrap(); + open_database(&db_path).unwrap(); assert_eq!(current_version(&db_path).unwrap(), CURRENT_VERSION); } } #[test] fn upgrade_to_4_works() { - let db_type = DatabaseType::Full; for version_from_file in &[None, Some(1), Some(2), Some(3)] { let db_dir = tempfile::TempDir::new().unwrap(); - let db_path = db_dir.path().join(db_type.as_str()); + let db_path = db_dir.path().join(FULL_DB_DIR); create_db(&db_path, *version_from_file); - open_database(&db_path, db_type).unwrap(); + open_database(&db_path).unwrap(); assert_eq!(current_version(&db_path).unwrap(), CURRENT_VERSION); } } diff --git a/substrate/client/db/src/utils.rs b/substrate/client/db/src/utils.rs index abf9c4629cee..5be417a687f5 100644 --- a/substrate/client/db/src/utils.rs +++ b/substrate/client/db/src/utils.rs @@ -19,9 +19,9 @@ //! Db-based backend utility structures and functions, used by both //! full and light storages. -use std::{fmt, fs, io, path::Path, sync::Arc}; +use std::{fmt, io, path::Path, sync::Arc}; -use log::{debug, info}; +use log::debug; use crate::{Database, DatabaseSource, DbHash}; use codec::Decode; @@ -40,8 +40,6 @@ pub const COLUMN_META: u32 = 0; /// Keys of entries in COLUMN_META. pub mod meta_keys { - /// Type of storage (full or light). - pub const TYPE: &[u8; 4] = b"type"; /// Best block key. pub const BEST_BLOCK: &[u8; 4] = b"best"; /// Last finalized block key. @@ -80,13 +78,6 @@ pub struct Meta { /// A block lookup key: used for canonical lookup from block number to hash pub type NumberIndexKey = [u8; 4]; -/// Database type. -#[derive(Clone, Copy, Debug, PartialEq)] -pub enum DatabaseType { - /// Full node database. - Full, -} - /// Convert block number into short lookup key (LE representation) for /// blocks that are in the canonical chain. /// @@ -174,27 +165,23 @@ where /// Opens the configured database. pub fn open_database( db_source: &DatabaseSource, - db_type: DatabaseType, create: bool, + archive: bool, ) -> OpenDbResult { - // Maybe migrate (copy) the database to a type specific subdirectory to make it - // possible that light and full databases coexist - // NOTE: This function can be removed in a few releases - maybe_migrate_to_type_subdir::(db_source, db_type)?; - - open_database_at::(db_source, db_type, create) + open_database_at::(db_source, create, archive) } fn open_database_at( db_source: &DatabaseSource, - db_type: DatabaseType, create: bool, + archive: bool, ) -> OpenDbResult { let db: Arc> = match &db_source { - DatabaseSource::ParityDb { path } => open_parity_db::(path, db_type, create)?, + DatabaseSource::ParityDb { path, multi_tree } => + open_parity_db::(path, create, archive, *multi_tree)?, #[cfg(feature = "rocksdb")] DatabaseSource::RocksDb { path, cache_size } => - open_kvdb_rocksdb::(path, db_type, create, *cache_size)?, + open_kvdb_rocksdb::(path, create, *cache_size)?, DatabaseSource::Custom { db, require_create_flag } => { if *require_create_flag && !create { return Err(OpenDbError::DoesNotExist) @@ -203,16 +190,15 @@ fn open_database_at( }, DatabaseSource::Auto { paritydb_path, rocksdb_path, cache_size } => { // check if rocksdb exists first, if not, open paritydb - match open_kvdb_rocksdb::(rocksdb_path, db_type, false, *cache_size) { + match open_kvdb_rocksdb::(rocksdb_path, false, *cache_size) { Ok(db) => db, Err(OpenDbError::NotEnabled(_)) | Err(OpenDbError::DoesNotExist) => - open_parity_db::(paritydb_path, db_type, create)?, + open_parity_db::(paritydb_path, create, archive, false)?, Err(as_is) => return Err(as_is), } }, }; - check_database_type(&*db, db_type)?; Ok(db) } @@ -224,10 +210,6 @@ pub enum OpenDbError { DoesNotExist, Internal(String), DatabaseError(sp_database::error::DatabaseError), - UnexpectedDbType { - expected: DatabaseType, - found: Vec, - }, } type OpenDbResult = Result>, OpenDbError>; @@ -243,14 +225,6 @@ impl fmt::Display for OpenDbError { OpenDbError::DatabaseError(db_error) => { write!(f, "Database Error: {}", db_error) }, - OpenDbError::UnexpectedDbType { expected, found } => { - write!( - f, - "Unexpected DB-Type. Expected: {:?}, Found: {:?}", - expected.as_str().as_bytes(), - found - ) - }, } } } @@ -281,27 +255,27 @@ impl From for OpenDbError { } } -fn open_parity_db(path: &Path, db_type: DatabaseType, create: bool) -> OpenDbResult { - match crate::parity_db::open(path, db_type, create, false) { +fn open_parity_db( + path: &Path, + create: bool, + archive: bool, + multi_tree: bool, +) -> OpenDbResult { + match crate::parity_db::open(path, create, false, archive, multi_tree) { Ok(db) => Ok(db), Err(parity_db::Error::InvalidConfiguration(_)) => { log::warn!("Invalid parity db configuration, attempting database metadata update."); // Try to update the database with the new config - Ok(crate::parity_db::open(path, db_type, create, true)?) + Ok(crate::parity_db::open(path, create, true, archive, multi_tree)?) }, Err(e) => Err(e.into()), } } #[cfg(any(feature = "rocksdb", test))] -fn open_kvdb_rocksdb( - path: &Path, - db_type: DatabaseType, - create: bool, - cache_size: usize, -) -> OpenDbResult { +fn open_kvdb_rocksdb(path: &Path, create: bool, cache_size: usize) -> OpenDbResult { // first upgrade database to required version - match crate::upgrade::upgrade_db::(path, db_type) { + match crate::upgrade::upgrade_db::(path) { // in case of missing version file, assume that database simply does not exist at given // location Ok(_) | Err(crate::upgrade::UpgradeError::MissingDatabaseVersionFile) => (), @@ -313,28 +287,25 @@ fn open_kvdb_rocksdb( db_config.create_if_missing = create; let mut memory_budget = std::collections::HashMap::new(); - match db_type { - DatabaseType::Full => { - let state_col_budget = (cache_size as f64 * 0.9) as usize; - let other_col_budget = (cache_size - state_col_budget) / (NUM_COLUMNS as usize - 1); - - for i in 0..NUM_COLUMNS { - if i == crate::columns::STATE { - memory_budget.insert(i, state_col_budget); - } else { - memory_budget.insert(i, other_col_budget); - } - } - log::trace!( - target: "db", - "Open RocksDB database at {:?}, state column budget: {} MiB, others({}) column cache: {} MiB", - path, - state_col_budget, - NUM_COLUMNS, - other_col_budget, - ); - }, + let state_col_budget = (cache_size as f64 * 0.9) as usize; + let other_col_budget = (cache_size - state_col_budget) / (NUM_COLUMNS as usize - 1); + + for i in 0..NUM_COLUMNS { + if i == crate::columns::STATE { + memory_budget.insert(i, state_col_budget); + } else { + memory_budget.insert(i, other_col_budget); + } } + log::trace!( + target: "db", + "Open RocksDB database at {:?}, state column budget: {} MiB, others({}) column cache: {} MiB", + path, + state_col_budget, + NUM_COLUMNS, + other_col_budget, + ); + db_config.memory_budget = memory_budget; let db = kvdb_rocksdb::Database::open(&db_config, path)?; @@ -346,75 +317,12 @@ fn open_kvdb_rocksdb( #[cfg(not(any(feature = "rocksdb", test)))] fn open_kvdb_rocksdb( _path: &Path, - _db_type: DatabaseType, _create: bool, _cache_size: usize, ) -> OpenDbResult { Err(OpenDbError::NotEnabled("with-kvdb-rocksdb")) } -/// Check database type. -pub fn check_database_type( - db: &dyn Database, - db_type: DatabaseType, -) -> Result<(), OpenDbError> { - match db.get(COLUMN_META, meta_keys::TYPE) { - Some(stored_type) => - if db_type.as_str().as_bytes() != &*stored_type { - return Err(OpenDbError::UnexpectedDbType { - expected: db_type, - found: stored_type.to_owned(), - }) - }, - None => { - let mut transaction = Transaction::new(); - transaction.set(COLUMN_META, meta_keys::TYPE, db_type.as_str().as_bytes()); - db.commit(transaction).map_err(OpenDbError::DatabaseError)?; - }, - } - - Ok(()) -} - -fn maybe_migrate_to_type_subdir( - source: &DatabaseSource, - db_type: DatabaseType, -) -> Result<(), OpenDbError> { - if let Some(p) = source.path() { - let mut basedir = p.to_path_buf(); - basedir.pop(); - - // Do we have to migrate to a database-type-based subdirectory layout: - // See if there's a file identifying a rocksdb or paritydb folder in the parent dir and - // the target path ends in a role specific directory - if (basedir.join("db_version").exists() || basedir.join("metadata").exists()) && - (p.ends_with(DatabaseType::Full.as_str())) - { - // Try to open the database to check if the current `DatabaseType` matches the type of - // database stored in the target directory and close the database on success. - let mut old_source = source.clone(); - old_source.set_path(&basedir); - open_database_at::(&old_source, db_type, false)?; - - info!( - "Migrating database to a database-type-based subdirectory: '{:?}' -> '{:?}'", - basedir, - basedir.join(db_type.as_str()) - ); - - let mut tmp_dir = basedir.clone(); - tmp_dir.pop(); - tmp_dir.push("tmp"); - - fs::rename(&basedir, &tmp_dir)?; - fs::create_dir_all(&p)?; - fs::rename(tmp_dir, &p)?; - } - } - - Ok(()) -} - /// Read database column entry for the given block. pub fn read_db( db: &dyn Database, @@ -545,15 +453,6 @@ pub fn read_genesis_hash( } } -impl DatabaseType { - /// Returns str representation of the type. - pub fn as_str(&self) -> &'static str { - match *self { - DatabaseType::Full => "full", - } - } -} - pub(crate) struct JoinInput<'a, 'b>(&'a [u8], &'b [u8]); pub(crate) fn join_input<'a, 'b>(i1: &'a [u8], i2: &'b [u8]) -> JoinInput<'a, 'b> { @@ -585,71 +484,6 @@ mod tests { use sp_runtime::testing::{Block as RawBlock, ExtrinsicWrapper}; type Block = RawBlock>; - #[cfg(feature = "rocksdb")] - #[test] - fn database_type_subdir_migration() { - use std::path::PathBuf; - type Block = RawBlock>; - - fn check_dir_for_db_type( - db_type: DatabaseType, - mut source: DatabaseSource, - db_check_file: &str, - ) { - let base_path = tempfile::TempDir::new().unwrap(); - let old_db_path = base_path.path().join("chains/dev/db"); - - source.set_path(&old_db_path); - - { - let db_res = open_database::(&source, db_type, true); - assert!(db_res.is_ok(), "New database should be created."); - assert!(old_db_path.join(db_check_file).exists()); - assert!(!old_db_path.join(db_type.as_str()).join("db_version").exists()); - } - - source.set_path(&old_db_path.join(db_type.as_str())); - - let db_res = open_database::(&source, db_type, true); - assert!(db_res.is_ok(), "Reopening the db with the same role should work"); - // check if the database dir had been migrated - assert!(!old_db_path.join(db_check_file).exists()); - assert!(old_db_path.join(db_type.as_str()).join(db_check_file).exists()); - } - - check_dir_for_db_type( - DatabaseType::Full, - DatabaseSource::RocksDb { path: PathBuf::new(), cache_size: 128 }, - "db_version", - ); - - check_dir_for_db_type( - DatabaseType::Full, - DatabaseSource::ParityDb { path: PathBuf::new() }, - "metadata", - ); - - // check failure on reopening with wrong role - { - let base_path = tempfile::TempDir::new().unwrap(); - let old_db_path = base_path.path().join("chains/dev/db"); - - let source = DatabaseSource::RocksDb { path: old_db_path.clone(), cache_size: 128 }; - { - let db_res = open_database::(&source, DatabaseType::Full, true); - assert!(db_res.is_ok(), "New database should be created."); - - // check if the database dir had been migrated - assert!(old_db_path.join("db_version").exists()); - assert!(!old_db_path.join("light/db_version").exists()); - assert!(!old_db_path.join("full/db_version").exists()); - } - // assert nothing was changed - assert!(old_db_path.join("db_version").exists()); - assert!(!old_db_path.join("full/db_version").exists()); - } - } - #[test] fn number_index_key_doesnt_panic() { let id = BlockId::::Number(72340207214430721); @@ -659,11 +493,6 @@ mod tests { }; } - #[test] - fn database_type_as_str_works() { - assert_eq!(DatabaseType::Full.as_str(), "full"); - } - #[test] fn join_input_works() { let buf1 = [1, 2, 3, 4]; @@ -700,13 +529,13 @@ mod tests { // it should create new auto (paritydb) database { - let db_res = open_database::(&source, DatabaseType::Full, true); + let db_res = open_database::(&source, true, false); assert!(db_res.is_ok(), "New database should be created."); } // it should reopen existing auto (pairtydb) database { - let db_res = open_database::(&source, DatabaseType::Full, true); + let db_res = open_database::(&source, true, false); assert!(db_res.is_ok(), "Existing parity database should be reopened"); } @@ -714,8 +543,8 @@ mod tests { { let db_res = open_database::( &DatabaseSource::RocksDb { path: rocksdb_path, cache_size: 128 }, - DatabaseType::Full, true, + false, ); assert!(db_res.is_ok(), "New database should be opened."); } @@ -723,9 +552,9 @@ mod tests { // it should reopen existing auto (pairtydb) database { let db_res = open_database::( - &DatabaseSource::ParityDb { path: paritydb_path }, - DatabaseType::Full, + &DatabaseSource::ParityDb { path: paritydb_path, multi_tree: false }, true, + false, ); assert!(db_res.is_ok(), "Existing parity database should be reopened"); } @@ -743,7 +572,7 @@ mod tests { // it should create new rocksdb database { - let db_res = open_database::(&source, DatabaseType::Full, true); + let db_res = open_database::(&source, true, false); assert!(db_res.is_ok(), "New rocksdb database should be created"); } @@ -755,8 +584,8 @@ mod tests { rocksdb_path: rocksdb_path.clone(), cache_size: 128, }, - DatabaseType::Full, true, + false, ); assert!(db_res.is_ok(), "Existing rocksdb database should be reopened"); } @@ -764,9 +593,9 @@ mod tests { // it should fail to open existing auto (rocksdb) database { let db_res = open_database::( - &DatabaseSource::ParityDb { path: paritydb_path }, - DatabaseType::Full, + &DatabaseSource::ParityDb { path: paritydb_path, multi_tree: false }, true, + false, ); assert!(db_res.is_ok(), "New paritydb database should be created"); } @@ -775,8 +604,8 @@ mod tests { { let db_res = open_database::( &DatabaseSource::RocksDb { path: rocksdb_path, cache_size: 128 }, - DatabaseType::Full, true, + false, ); assert!(db_res.is_ok(), "Existing rocksdb database should be reopened"); } @@ -790,17 +619,18 @@ mod tests { let paritydb_path = db_path.join("paritydb"); let rocksdb_path = db_path.join("rocksdb_path"); - let source = DatabaseSource::ParityDb { path: paritydb_path.clone() }; + // Note multitree db do not support auto at this point. + let source = DatabaseSource::ParityDb { path: paritydb_path.clone(), multi_tree: false }; // it should create new paritydb database { - let db_res = open_database::(&source, DatabaseType::Full, true); + let db_res = open_database::(&source, true, false); assert!(db_res.is_ok(), "New database should be created."); } // it should reopen existing pairtydb database { - let db_res = open_database::(&source, DatabaseType::Full, true); + let db_res = open_database::(&source, true, false); assert!(db_res.is_ok(), "Existing parity database should be reopened"); } @@ -808,8 +638,8 @@ mod tests { { let db_res = open_database::( &DatabaseSource::RocksDb { path: rocksdb_path.clone(), cache_size: 128 }, - DatabaseType::Full, true, + false, ); assert!(db_res.is_ok(), "New rocksdb database should be created"); } @@ -818,8 +648,8 @@ mod tests { { let db_res = open_database::( &DatabaseSource::Auto { paritydb_path, rocksdb_path, cache_size: 128 }, - DatabaseType::Full, true, + false, ); assert!(db_res.is_ok(), "Existing parity database should be reopened"); } diff --git a/substrate/client/executor/src/integration_tests/mod.rs b/substrate/client/executor/src/integration_tests/mod.rs index 7f91b3ffe764..0b6b93b5e0d0 100644 --- a/substrate/client/executor/src/integration_tests/mod.rs +++ b/substrate/client/executor/src/integration_tests/mod.rs @@ -352,7 +352,9 @@ fn ordered_trie_root_should_work(wasm_method: WasmExecutionMethod) { let trie_input = vec![b"zero".to_vec(), b"one".to_vec(), b"two".to_vec()]; assert_eq!( call_in_wasm("test_ordered_trie_root", &[0], wasm_method, &mut ext.ext(),).unwrap(), - Layout::::ordered_trie_root(trie_input.iter()).as_bytes().encode(), + Layout::::ordered_trie_root(trie_input.iter()) + .as_bytes() + .encode(), ); } diff --git a/substrate/client/network/src/discovery.rs b/substrate/client/network/src/discovery.rs index 77c26266aac4..98bed5aaf0af 100644 --- a/substrate/client/network/src/discovery.rs +++ b/substrate/client/network/src/discovery.rs @@ -72,7 +72,7 @@ use libp2p::{ }, PeerId, }; -use log::{debug, info, trace, warn}; +use log::{debug, trace, warn}; use sp_core::hexdisplay::HexDisplay; use std::{ cmp, @@ -646,7 +646,7 @@ impl NetworkBehaviour for DiscoveryBehaviour { // NOTE: we might re-discover the same address multiple times // in which case we just want to refrain from logging. if self.known_external_addresses.insert(new_addr.clone()) { - info!( + debug!( target: "sub-libp2p", "🔍 Discovered new external address for our node: {}", new_addr, diff --git a/substrate/client/network/test/src/lib.rs b/substrate/client/network/test/src/lib.rs index aeed2985ace4..4c91d09e4fbc 100644 --- a/substrate/client/network/test/src/lib.rs +++ b/substrate/client/network/test/src/lib.rs @@ -146,10 +146,6 @@ impl PeersClient { self.backend.clone() } - pub fn as_block_import(&self) -> BlockImportAdapter { - BlockImportAdapter::new(self.clone()) - } - pub fn get_aux(&self, key: &[u8]) -> ClientResult>> { self.client.get_aux(key) } @@ -231,7 +227,7 @@ pub struct Peer { verifier: VerifierAdapter, /// We keep a copy of the block_import so that we can invoke it for locally-generated blocks, /// instead of going through the import queue. - block_import: BlockImportAdapter, + block_import: BlockImport, select_chain: Option>, backend: Option>, network: NetworkWorker::Hash>, @@ -559,55 +555,6 @@ where } } -pub trait BlockImportAdapterFull: - BlockImport + Send + Sync + Clone -{ -} - -impl BlockImportAdapterFull for T where - T: BlockImport + Send + Sync + Clone -{ -} - -/// Implements `BlockImport` for any `Transaction`. Internally the transaction is -/// "converted", aka the field is set to `None`. -/// -/// This is required as the `TestNetFactory` trait does not distinguish between -/// full and light nodes. -#[derive(Clone)] -pub struct BlockImportAdapter { - inner: I, -} - -impl BlockImportAdapter { - /// Create a new instance of `Self::Full`. - pub fn new(inner: I) -> Self { - Self { inner } - } -} - -#[async_trait::async_trait] -impl BlockImport for BlockImportAdapter -where - I: BlockImport + Send + Sync, -{ - type Error = ConsensusError; - - async fn check_block( - &mut self, - block: BlockCheckParams, - ) -> Result { - self.inner.check_block(block).await - } - - async fn import_block( - &mut self, - block: BlockImportParams, - ) -> Result { - self.inner.import_block(block).await - } -} - /// Implements `Verifier` and keeps track of failed verifications. struct VerifierAdapter { verifier: Arc>>>, @@ -725,11 +672,7 @@ pub trait TestNetFactory: Default + Sized + Send { fn make_block_import( &self, client: PeersClient, - ) -> ( - BlockImportAdapter, - Option>, - Self::PeerData, - ); + ) -> (Self::BlockImport, Option>, Self::PeerData); /// Create new test network with this many peers. fn new(n: usize) -> Self { @@ -1145,12 +1088,8 @@ impl TestNetFactory for TestNet { fn make_block_import( &self, client: PeersClient, - ) -> ( - BlockImportAdapter, - Option>, - Self::PeerData, - ) { - (client.as_block_import(), None, ()) + ) -> (Self::BlockImport, Option>, Self::PeerData) { + (client, None, ()) } fn peer(&mut self, i: usize) -> &mut Peer<(), Self::BlockImport> { @@ -1226,11 +1165,7 @@ impl TestNetFactory for JustificationTestNet { fn make_block_import( &self, client: PeersClient, - ) -> ( - BlockImportAdapter, - Option>, - Self::PeerData, - ) { - (client.as_block_import(), Some(Box::new(ForceFinalized(client))), Default::default()) + ) -> (Self::BlockImport, Option>, Self::PeerData) { + (client.clone(), Some(Box::new(ForceFinalized(client))), Default::default()) } } diff --git a/substrate/client/service/src/client/call_executor.rs b/substrate/client/service/src/client/call_executor.rs index 86b5c7c61fcd..1b2b6b6b12bf 100644 --- a/substrate/client/service/src/client/call_executor.rs +++ b/substrate/client/service/src/client/call_executor.rs @@ -220,14 +220,10 @@ where match recorder { Some(recorder) => { - let trie_state = state.as_trie_backend(); - - let backend = sp_state_machine::TrieBackendBuilder::wrap(&trie_state) - .with_recorder(recorder.clone()) - .build(); - + let backend = state.as_trie_backend(); + let backend = backend.with_temp_recorder(recorder.clone()); let mut state_machine = StateMachine::new( - &backend, + &*backend, changes, &self.executor, method, @@ -275,7 +271,6 @@ where let at_number = self.backend.blockchain().expect_block_number_from_id(&BlockId::Hash(at_hash))?; let state = self.backend.state_at(at_hash)?; - let trie_backend = state.as_trie_backend(); let state_runtime_code = sp_state_machine::backend::BackendRuntimeCode::new(trie_backend); diff --git a/substrate/client/service/src/client/client.rs b/substrate/client/service/src/client/client.rs index 35e8b53a09cf..56f8b947569c 100644 --- a/substrate/client/service/src/client/client.rs +++ b/substrate/client/service/src/client/client.rs @@ -630,7 +630,7 @@ where let storage_changes = match storage_changes { sc_consensus::StorageChanges::Changes(storage_changes) => { self.backend.begin_state_operation(&mut operation.op, parent_hash)?; - let (main_sc, child_sc, offchain_sc, tx, _, tx_index) = + let (main_sc, child_sc, offchain_sc, tx, tx_index) = storage_changes.into_inner(); if self.config.offchain_indexing_api { @@ -884,7 +884,7 @@ where .into_storage_changes(&state, *parent_hash) .map_err(sp_blockchain::Error::Storage)?; - if import_block.header.state_root() != &gen_storage_changes.transaction_storage_root + if import_block.header.state_root() != &gen_storage_changes.transaction.root_hash() { return Err(Error::InvalidStateRoot) } @@ -1270,7 +1270,7 @@ where ) -> sp_blockchain::Result<(CompactProof, u32)> { let state = self.state_at(hash)?; // this is a read proof, using version V0 or V1 is equivalent. - let root = state.storage_root(std::iter::empty(), StateVersion::V0).0; + let root = state.storage_root(std::iter::empty(), StateVersion::V0).root_hash(); let (proof, count) = prove_range_read_with_child_with_size::<_, HashingFor>( state, size_limit, start_key, @@ -1401,13 +1401,13 @@ where ) -> sp_blockchain::Result<(KeyValueStates, usize)> { let mut db = sp_state_machine::MemoryDB::>::new(&[]); // Compact encoding - let _ = sp_trie::decode_compact::>, _, _>( + let _ = sp_trie::decode_compact::, ()>, _>( &mut db, proof.iter_compact_encoded_nodes(), Some(&root), ) .map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))?; - let proving_backend = sp_state_machine::TrieBackendBuilder::new(db, root).build(); + let proving_backend = sp_state_machine::TrieBackendBuilder::new(Box::new(db), root).build(); let state = read_range_proof_check_with_child_on_proving_backend::>( &proving_backend, start_key, diff --git a/substrate/client/state-db/src/lib.rs b/substrate/client/state-db/src/lib.rs index 41c231c31aaf..c0f7df744c6a 100644 --- a/substrate/client/state-db/src/lib.rs +++ b/substrate/client/state-db/src/lib.rs @@ -537,7 +537,21 @@ impl StateDb { ref_counting: bool, should_init: bool, ) -> Result<(CommitSet, StateDb), Error> { - let stored_mode = fetch_stored_pruning_mode(&db)?; + let (db_init_commit_set, selected_mode) = + Self::open_meta(&db, requested_mode, should_init)?; + let state_db = + StateDb { db: RwLock::new(StateDbSync::new(selected_mode, ref_counting, db)?) }; + + Ok((db_init_commit_set, state_db)) + } + + /// Read or initalize metadata. + pub fn open_meta( + db: &D, + requested_mode: Option, + should_init: bool, + ) -> Result<(CommitSet, PruningMode), Error> { + let stored_mode = fetch_stored_pruning_mode(db)?; let selected_mode = match (should_init, stored_mode, requested_mode) { (true, stored_mode, requested_mode) => { @@ -568,11 +582,7 @@ impl StateDb { } else { Default::default() }; - - let state_db = - StateDb { db: RwLock::new(StateDbSync::new(selected_mode, ref_counting, db)?) }; - - Ok((db_init_commit_set, state_db)) + Ok((db_init_commit_set, selected_mode)) } pub fn pruning_mode(&self) -> PruningMode { diff --git a/substrate/client/statement-store/Cargo.toml b/substrate/client/statement-store/Cargo.toml index 676f6cb36f67..afc07fd03c4e 100644 --- a/substrate/client/statement-store/Cargo.toml +++ b/substrate/client/statement-store/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] log = { workspace = true, default-features = true } parking_lot = "0.12.1" -parity-db = "0.4.12" +parity-db = "0.4.13" tokio = { version = "1.22.0", features = ["time"] } sp-statement-store = { path = "../../primitives/statement-store" } prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus" } diff --git a/substrate/frame/message-queue/src/integration_test.rs b/substrate/frame/message-queue/src/integration_test.rs index ce8eb80805ab..6c8d69db1d3c 100644 --- a/substrate/frame/message-queue/src/integration_test.rs +++ b/substrate/frame/message-queue/src/integration_test.rs @@ -80,15 +80,15 @@ impl Config for Test { /// # Example output /// /// ```pre -/// Enqueued 1189 messages across 176 queues. Payload 46.97 KiB -/// Processing 772 of 1189 messages -/// Enqueued 9270 messages across 1559 queues. Payload 131.85 KiB -/// Processing 6262 of 9687 messages -/// Enqueued 5025 messages across 1225 queues. Payload 100.23 KiB -/// Processing 1739 of 8450 messages -/// Enqueued 42061 messages across 6357 queues. Payload 536.29 KiB -/// Processing 11675 of 48772 messages -/// Enqueued 20253 messages across 2420 queues. Payload 288.34 KiB +/// Enqueued 1189 messages across 176 queues. Payload 46.97 KiB +/// Processing 772 of 1189 messages +/// Enqueued 9270 messages across 1559 queues. Payload 131.85 KiB +/// Processing 6262 of 9687 messages +/// Enqueued 5025 messages across 1225 queues. Payload 100.23 KiB +/// Processing 1739 of 8450 messages +/// Enqueued 42061 messages across 6357 queues. Payload 536.29 KiB +/// Processing 11675 of 48772 messages +/// Enqueued 20253 messages across 2420 queues. Payload 288.34 KiB /// Processing 28711 of 57350 messages /// Processing all remaining 28639 messages /// ``` @@ -194,14 +194,14 @@ fn stress_test_recursive() { /// # Example output /// /// ```pre -/// Enqueued 11776 messages across 2526 queues. Payload 173.94 KiB -/// Suspended 63 and resumed 7 queues of 2526 in total -/// Processing 593 messages. Resumed msgs: 11599, All msgs: 11776 -/// Enqueued 30104 messages across 5533 queues. Payload 416.62 KiB -/// Suspended 24 and resumed 15 queues of 5533 in total -/// Processing 12841 messages. Resumed msgs: 40857, All msgs: 41287 -/// Processing all 28016 remaining resumed messages -/// Resumed all 64 suspended queues +/// Enqueued 11776 messages across 2526 queues. Payload 173.94 KiB +/// Suspended 63 and resumed 7 queues of 2526 in total +/// Processing 593 messages. Resumed msgs: 11599, All msgs: 11776 +/// Enqueued 30104 messages across 5533 queues. Payload 416.62 KiB +/// Suspended 24 and resumed 15 queues of 5533 in total +/// Processing 12841 messages. Resumed msgs: 40857, All msgs: 41287 +/// Processing all 28016 remaining resumed messages +/// Resumed all 64 suspended queues /// Processing all remaining 430 messages /// ``` #[test] diff --git a/substrate/frame/session/src/historical/mod.rs b/substrate/frame/session/src/historical/mod.rs index b9cecea1a7f7..8018baa0eeeb 100644 --- a/substrate/frame/session/src/historical/mod.rs +++ b/substrate/frame/session/src/historical/mod.rs @@ -39,8 +39,8 @@ use sp_session::{MembershipProof, ValidatorCount}; use sp_staking::SessionIndex; use sp_std::prelude::*; use sp_trie::{ - trie_types::{TrieDBBuilder, TrieDBMutBuilderV0}, - LayoutV0, MemoryDB, Recorder, Trie, TrieMut, EMPTY_PREFIX, + trie_types::{TrieDBBuilderV0, TrieDBMutBuilderV0}, + LayoutV0, MemoryDB, Recorder, Trie, EMPTY_PREFIX, }; use frame_support::{ @@ -232,44 +232,38 @@ impl ProvingTrie { I: IntoIterator, { let mut db = MemoryDB::default(); - let mut root = Default::default(); + let mut trie = TrieDBMutBuilderV0::new(&mut db).build(); + for (i, (validator, full_id)) in validators.into_iter().enumerate() { + let i = i as u32; + let keys = match >::load_keys(&validator) { + None => continue, + Some(k) => k, + }; - { - let mut trie = TrieDBMutBuilderV0::new(&mut db, &mut root).build(); - for (i, (validator, full_id)) in validators.into_iter().enumerate() { - let i = i as u32; - let keys = match >::load_keys(&validator) { - None => continue, - Some(k) => k, - }; - - let full_id = (validator, full_id); - - // map each key to the owner index. - for key_id in T::Keys::key_ids() { - let key = keys.get_raw(*key_id); - let res = - (key_id, key).using_encoded(|k| i.using_encoded(|v| trie.insert(k, v))); - - let _ = res.map_err(|_| "failed to insert into trie")?; - } - - // map each owner index to the full identification. - let _ = i - .using_encoded(|k| full_id.using_encoded(|v| trie.insert(k, v))) - .map_err(|_| "failed to insert into trie")?; + let full_id = (validator, full_id); + + // map each key to the owner index. + for key_id in T::Keys::key_ids() { + let key = keys.get_raw(*key_id); + let res = (key_id, key).using_encoded(|k| i.using_encoded(|v| trie.insert(k, v))); + + let _ = res.map_err(|_| "failed to insert into trie")?; } + + // map each owner index to the full identification. + let _ = i + .using_encoded(|k| full_id.using_encoded(|v| trie.insert(k, v))) + .map_err(|_| "failed to insert into trie")?; } + let root = trie.commit().apply_to(&mut db); Ok(ProvingTrie { db, root }) } fn from_nodes(root: T::Hash, nodes: &[Vec]) -> Self { - use sp_trie::HashDBT; - let mut memory_db = MemoryDB::default(); for node in nodes { - HashDBT::insert(&mut memory_db, EMPTY_PREFIX, &node[..]); + memory_db.insert(EMPTY_PREFIX, &node[..]); } ProvingTrie { db: memory_db, root } @@ -277,10 +271,11 @@ impl ProvingTrie { /// Prove the full verification data for a given key and key ID. pub fn prove(&self, key_id: KeyTypeId, key_data: &[u8]) -> Option>> { - let mut recorder = Recorder::>::new(); + let mut recorder = Recorder::>::new(); { - let trie = - TrieDBBuilder::new(&self.db, &self.root).with_recorder(&mut recorder).build(); + let trie = TrieDBBuilderV0::::new(&self.db, &self.root) + .with_recorder(&mut recorder) + .build(); let val_idx = (key_id, key_data).using_encoded(|s| { trie.get(s).ok()?.and_then(|raw| u32::decode(&mut &*raw).ok()) })?; @@ -300,10 +295,10 @@ impl ProvingTrie { &self.root } - // Check a proof contained within the current memory-db. Returns `None` if the + // Check a proof contained within the current `MemoryDB`. Returns `None` if the // nodes within the current `MemoryDB` are insufficient to query the item. fn query(&self, key_id: KeyTypeId, key_data: &[u8]) -> Option> { - let trie = TrieDBBuilder::new(&self.db, &self.root).build(); + let trie = TrieDBBuilderV0::::new(&self.db, &self.root).build(); let val_idx = (key_id, key_data) .using_encoded(|s| trie.get(s)) .ok()? diff --git a/substrate/frame/state-trie-migration/src/lib.rs b/substrate/frame/state-trie-migration/src/lib.rs index 6b3aa9934e07..daf0670e8562 100644 --- a/substrate/frame/state-trie-migration/src/lib.rs +++ b/substrate/frame/state-trie-migration/src/lib.rs @@ -1722,7 +1722,8 @@ pub(crate) mod remote_tests { ext.state_version = sp_core::storage::StateVersion::V1; let status = - substrate_state_trie_migration_rpc::migration_status(&ext.as_backend()).unwrap(); + substrate_state_trie_migration_rpc::migration_status(&ext.as_backend().unwrap()) + .unwrap(); assert!( status.top_remaining_to_migrate > 0, "no node needs migrating, this probably means that state was initialized with `StateVersion::V1`", @@ -1783,7 +1784,8 @@ pub(crate) mod remote_tests { }); let status = - substrate_state_trie_migration_rpc::migration_status(&ext.as_backend()).unwrap(); + substrate_state_trie_migration_rpc::migration_status(&ext.as_backend().unwrap()) + .unwrap(); assert_eq!(status.top_remaining_to_migrate, 0); assert_eq!(status.child_remaining_to_migrate, 0); } diff --git a/substrate/primitives/api/Cargo.toml b/substrate/primitives/api/Cargo.toml index f4b1d13c5203..de4865a6be30 100644 --- a/substrate/primitives/api/Cargo.toml +++ b/substrate/primitives/api/Cargo.toml @@ -26,7 +26,7 @@ sp-externalities = { path = "../externalities", default-features = false, option sp-version = { path = "../version", default-features = false } sp-state-machine = { path = "../state-machine", default-features = false, optional = true } sp-trie = { path = "../trie", default-features = false, optional = true } -hash-db = { version = "0.16.0", optional = true } +trie-db = { package = "subtrie", version = "0.0.1", optional = true } thiserror = { optional = true, workspace = true } scale-info = { version = "2.10.0", default-features = false, features = [ "derive", @@ -41,7 +41,6 @@ sp-test-primitives = { path = "../test-primitives" } default = ["std"] std = [ "codec/std", - "hash-db", "log/std", "scale-info/std", "sp-api-proc-macro/std", @@ -57,6 +56,7 @@ std = [ "sp-trie/std", "sp-version/std", "thiserror", + "trie-db", ] # Special feature to disable logging completely. # diff --git a/substrate/primitives/api/proc-macro/src/impl_runtime_apis.rs b/substrate/primitives/api/proc-macro/src/impl_runtime_apis.rs index b7e5600a017a..c48d1fac6349 100644 --- a/substrate/primitives/api/proc-macro/src/impl_runtime_apis.rs +++ b/substrate/primitives/api/proc-macro/src/impl_runtime_apis.rs @@ -337,7 +337,7 @@ fn generate_runtime_api_base_structures() -> Result { ) -> std::option::Option<#crate_::StorageProof> { let recorder = std::option::Option::take(&mut self.recorder); std::option::Option::map(recorder, |recorder| { - #crate_::ProofRecorder::::drain_storage_proof(recorder) + #crate_::ProofRecorder::::drain_storage_proof(&recorder) }) } diff --git a/substrate/primitives/api/src/lib.rs b/substrate/primitives/api/src/lib.rs index a945b9f21f3c..9bb65ebf40f3 100644 --- a/substrate/primitives/api/src/lib.rs +++ b/substrate/primitives/api/src/lib.rs @@ -77,7 +77,6 @@ extern crate self as sp_api; pub mod __private { #[cfg(feature = "std")] mod std_imports { - pub use hash_db::Hasher; pub use sp_core::traits::CallContext; pub use sp_externalities::{Extension, Extensions}; pub use sp_runtime::StateVersion; @@ -85,6 +84,7 @@ pub mod __private { Backend as StateBackend, InMemoryBackend, OverlayedChanges, StorageProof, TrieBackend, TrieBackendBuilder, }; + pub use trie_db::node_db::Hasher; } #[cfg(feature = "std")] pub use std_imports::*; @@ -124,6 +124,8 @@ use sp_runtime::{traits::Block as BlockT, ExtrinsicInclusionMode}; pub use sp_state_machine::StorageProof; #[cfg(feature = "std")] use sp_state_machine::{backend::AsTrieBackend, Backend as StateBackend, OverlayedChanges}; +#[cfg(feature = "std")] +use sp_trie::DBLocation; use sp_version::RuntimeVersion; #[cfg(feature = "std")] use std::cell::RefCell; @@ -517,7 +519,7 @@ pub use sp_api_proc_macro::mock_impl_runtime_apis; /// A type that records all accessed trie nodes and generates a proof out of it. #[cfg(feature = "std")] -pub type ProofRecorder = sp_trie::recorder::Recorder>; +pub type ProofRecorder = sp_trie::recorder::Recorder, DBLocation>; #[cfg(feature = "std")] pub type StorageChanges = sp_state_machine::StorageChanges>; diff --git a/substrate/primitives/consensus/babe/src/digests.rs b/substrate/primitives/consensus/babe/src/digests.rs index afc967e3af39..e7af8c5763a0 100644 --- a/substrate/primitives/consensus/babe/src/digests.rs +++ b/substrate/primitives/consensus/babe/src/digests.rs @@ -22,9 +22,10 @@ use super::{ BabeEpochConfiguration, Randomness, Slot, BABE_ENGINE_ID, }; +#[cfg(not(feature = "std"))] +use alloc::vec::Vec; use sp_core::sr25519::vrf::VrfSignature; use sp_runtime::{DigestItem, RuntimeDebug}; -use sp_std::vec::Vec; use codec::{Decode, Encode, MaxEncodedLen}; use scale_info::TypeInfo; diff --git a/substrate/primitives/consensus/babe/src/lib.rs b/substrate/primitives/consensus/babe/src/lib.rs index ff0b4568226e..6eb75b270a02 100644 --- a/substrate/primitives/consensus/babe/src/lib.rs +++ b/substrate/primitives/consensus/babe/src/lib.rs @@ -20,15 +20,18 @@ #![forbid(unsafe_code, missing_docs, unused_variables, unused_imports)] #![cfg_attr(not(feature = "std"), no_std)] +extern crate alloc; + pub mod digests; pub mod inherents; +#[cfg(not(feature = "std"))] +use alloc::vec::Vec; use codec::{Decode, Encode, MaxEncodedLen}; use scale_info::TypeInfo; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; use sp_runtime::{traits::Header, ConsensusEngineId, RuntimeDebug}; -use sp_std::vec::Vec; use crate::digests::{NextConfigDescriptor, NextEpochDescriptor}; diff --git a/substrate/primitives/core/Cargo.toml b/substrate/primitives/core/Cargo.toml index 908f2498de53..b3f9181260a3 100644 --- a/substrate/primitives/core/Cargo.toml +++ b/substrate/primitives/core/Cargo.toml @@ -23,7 +23,7 @@ serde = { optional = true, features = ["alloc", "derive"], workspace = true } bounded-collections = { version = "0.2.0", default-features = false } primitive-types = { version = "0.12.0", default-features = false, features = ["codec", "scale-info"] } impl-serde = { version = "0.4.0", default-features = false, optional = true } -hash-db = { version = "0.16.0", default-features = false } +trie-db = { package = "subtrie", version = "0.0.1", default-features = false } hash256-std-hasher = { version = "0.15.2", default-features = false } bs58 = { version = "0.5.0", default-features = false, optional = true } rand = { version = "0.8.5", features = ["small_rng"], optional = true } @@ -94,7 +94,6 @@ std = [ "full_crypto", "futures", "futures/thread-pool", - "hash-db/std", "hash256-std-hasher/std", "impl-serde/std", "itertools", @@ -123,6 +122,7 @@ std = [ "substrate-bip39/std", "thiserror", "tracing", + "trie-db/std", "w3f-bls?/std", "zeroize/alloc", "zeroize/std", diff --git a/substrate/primitives/core/src/hasher.rs b/substrate/primitives/core/src/hasher.rs index fc613dba161e..f4754cddc0d6 100644 --- a/substrate/primitives/core/src/hasher.rs +++ b/substrate/primitives/core/src/hasher.rs @@ -20,7 +20,7 @@ pub mod blake2 { use crate::hash::H256; use hash256_std_hasher::Hash256StdHasher; - use hash_db::Hasher; + use trie_db::node_db::Hasher; /// Concrete implementation of Hasher using Blake2b 256-bit hashes #[derive(Debug)] @@ -40,7 +40,7 @@ pub mod blake2 { pub mod keccak { use crate::hash::H256; use hash256_std_hasher::Hash256StdHasher; - use hash_db::Hasher; + use trie_db::node_db::Hasher; /// Concrete implementation of Hasher using Keccak 256-bit hashes #[derive(Debug)] diff --git a/substrate/primitives/core/src/lib.rs b/substrate/primitives/core/src/lib.rs index c0b41234460d..235eec74027a 100644 --- a/substrate/primitives/core/src/lib.rs +++ b/substrate/primitives/core/src/lib.rs @@ -91,7 +91,7 @@ pub use crypto::{ByteArray, DeriveJunction, Pair, Public}; pub use self::hasher::blake2::Blake2Hasher; #[cfg(feature = "std")] pub use self::hasher::keccak::KeccakHasher; -pub use hash_db::Hasher; +pub use trie_db::node_db::Hasher; pub use bounded_collections as bounded; #[cfg(feature = "std")] diff --git a/substrate/primitives/database/Cargo.toml b/substrate/primitives/database/Cargo.toml index 081aad607584..b3e5d2e1dab9 100644 --- a/substrate/primitives/database/Cargo.toml +++ b/substrate/primitives/database/Cargo.toml @@ -16,3 +16,5 @@ workspace = true [dependencies] kvdb = "0.13.0" parking_lot = "0.12.1" +trie-db = { package = "subtrie", version = "0.0.1", default-features = false } +parity-db = "0.4.13" diff --git a/substrate/primitives/database/src/kvdb.rs b/substrate/primitives/database/src/kvdb.rs index 735813c36857..d56e8278a3a7 100644 --- a/substrate/primitives/database/src/kvdb.rs +++ b/substrate/primitives/database/src/kvdb.rs @@ -18,7 +18,7 @@ /// A wrapper around `kvdb::Database` that implements `sp_database::Database` trait use ::kvdb::{DBTransaction, KeyValueDB}; -use crate::{error, Change, ColumnId, Database, Transaction}; +use crate::{error, Change, ColumnId, DBLocation, Database, Transaction}; struct DbAdapter(D); @@ -82,7 +82,7 @@ impl> Database for DbAdapter { tx.put_vec(col, key.as_ref(), value); }, }, - Change::Reference(col, key) => { + Change::Reference(col, key) | Change::ReferenceTree(col, key) => { if let (counter_key, Some(mut counter)) = self.read_counter(col, key.as_ref())? { @@ -90,7 +90,7 @@ impl> Database for DbAdapter { tx.put(col, &counter_key, &counter.to_le_bytes()); } }, - Change::Release(col, key) => { + Change::Release(col, key) | Change::ReleaseTree(col, key) => { if let (counter_key, Some(mut counter)) = self.read_counter(col, key.as_ref())? { @@ -103,6 +103,9 @@ impl> Database for DbAdapter { } } }, + Change::StoreTree(_col, _key, _tree) => { + unimplemented!("StoreTree is not supported by kvdb"); + }, } } self.0.write(tx).map_err(|e| error::DatabaseError(Box::new(e))) @@ -115,4 +118,13 @@ impl> Database for DbAdapter { fn contains(&self, col: ColumnId, key: &[u8]) -> bool { handle_err(self.0.has_key(col, key)) } + + fn get_node( + &self, + _col: ColumnId, + _key: &[u8], + _location: DBLocation, + ) -> Option<(Vec, Vec)> { + unreachable!("kvdb do not support multi tree") + } } diff --git a/substrate/primitives/database/src/lib.rs b/substrate/primitives/database/src/lib.rs index 42920bbefb49..901c1235d3be 100644 --- a/substrate/primitives/database/src/lib.rs +++ b/substrate/primitives/database/src/lib.rs @@ -23,25 +23,33 @@ mod mem; pub use crate::kvdb::as_database; pub use mem::MemDb; +pub use parity_db::{NewNode, NodeAddress, NodeRef}; /// An identifier for a column. pub type ColumnId = u32; +/// Node location hint. +pub type DBLocation = u64; + /// An alteration to the database. -#[derive(Clone)] pub enum Change { Set(ColumnId, Vec, Vec), Remove(ColumnId, Vec), Store(ColumnId, H, Vec), Reference(ColumnId, H), Release(ColumnId, H), + StoreTree(ColumnId, H, NewTree), + ReferenceTree(ColumnId, H), + ReleaseTree(ColumnId, H), } /// A series of changes to the database that can be committed atomically. They do not take effect /// until passed into `Database::commit`. -#[derive(Default, Clone)] +#[derive(Default)] pub struct Transaction(pub Vec>); +pub type NewTree = parity_db::NewNode; + impl Transaction { /// Create a new transaction to be prepared and committed atomically. pub fn new() -> Self { @@ -69,12 +77,49 @@ impl Transaction { pub fn reference(&mut self, col: ColumnId, hash: H) { self.0.push(Change::Reference(col, hash)) } + /// Increase the number of references for `hash` in the database. + pub fn reference_tree(&mut self, col: ColumnId, hash: H) { + self.0.push(Change::ReferenceTree(col, hash)) + } /// Release the preimage of `hash` from the database. An equal number of these to the number of /// corresponding `store`s must have been given before it is legal for `Database::get` to /// be unable to provide the preimage. pub fn release(&mut self, col: ColumnId, hash: H) { self.0.push(Change::Release(col, hash)) } + + /// Release the preimage of `hash` from the database. An equal number of these to the number of + /// corresponding `store`s must have been given before it is legal for `Database::get` to + /// be unable to provide the preimage. + pub fn release_tree(&mut self, col: ColumnId, hash: H) { + self.0.push(Change::ReleaseTree(col, hash)) + } + + /// Insert a new new tree into the database. + pub fn insert_tree(&mut self, col: ColumnId, hash: H, tree: NewTree) { + self.0.push(Change::StoreTree(col, hash, tree)) + } +} + +/// Specific capabilities of databases. +#[derive(Clone, Copy, PartialEq, Eq)] +pub enum StateCapabilities { + /// Allow direct storage tree nodes. + TreeColumn, + /// Reference counted storage is supported. + RefCounted, + /// Nothing specific, will require key prefixing. + None, +} + +impl StateCapabilities { + /// Whether the database needs key prefixing. + pub fn needs_key_prefixing(self) -> bool { + match self { + StateCapabilities::None => true, + _ => false, + } + } } pub trait Database>: Send + Sync { @@ -106,17 +151,21 @@ pub trait Database>: Send + Sync { } } - /// Check if database supports internal ref counting for state data. + /// Capabilities for state data. /// - /// For backwards compatibility returns `false` by default. - fn supports_ref_counting(&self) -> bool { - false + /// For backwards compatibility returns `None` by default. + fn state_capabilities(&self) -> StateCapabilities { + StateCapabilities::None } - /// Remove a possible path-prefix from the key. - /// - /// Not all database implementations use a prefix for keys, so this function may be a noop. - fn sanitize_key(&self, _key: &mut Vec) {} + /// Retrieve the tree node previously stored against `key` and `location` or `None` if + /// if no such node exists. + fn get_node( + &self, + col: ColumnId, + key: &[u8], + location: DBLocation, + ) -> Option<(Vec, Vec)>; } impl std::fmt::Debug for dyn Database { diff --git a/substrate/primitives/database/src/mem.rs b/substrate/primitives/database/src/mem.rs index 71ba7a992763..4a32e158d0a7 100644 --- a/substrate/primitives/database/src/mem.rs +++ b/substrate/primitives/database/src/mem.rs @@ -17,7 +17,7 @@ //! In-memory implementation of `Database` -use crate::{error, Change, ColumnId, Database, Transaction}; +use crate::{error, Change, ColumnId, DBLocation, Database, Transaction}; use parking_lot::RwLock; use std::collections::{hash_map::Entry, HashMap}; @@ -46,14 +46,13 @@ where .and_modify(|(c, _)| *c += 1) .or_insert_with(|| (1, value)); }, - Change::Reference(col, hash) => { + Change::Reference(col, hash) | Change::ReferenceTree(col, hash) => if let Entry::Occupied(mut entry) = s.entry(col).or_default().entry(hash.as_ref().to_vec()) { entry.get_mut().0 += 1; - } - }, - Change::Release(col, hash) => { + }, + Change::Release(col, hash) | Change::ReleaseTree(col, hash) => if let Entry::Occupied(mut entry) = s.entry(col).or_default().entry(hash.as_ref().to_vec()) { @@ -61,7 +60,9 @@ where if entry.get().0 == 0 { entry.remove(); } - } + }, + Change::StoreTree(_, _, _) => { + unimplemented!("MemDb does not support tree storage"); }, } } @@ -73,6 +74,17 @@ where let s = self.0.read(); s.get(&col).and_then(|c| c.get(key).map(|(_, v)| v.clone())) } + + fn get_node( + &self, + col: ColumnId, + key: &[u8], + _location: DBLocation, + ) -> Option<(Vec, Vec)> { + let s = self.0.read(); + s.get(&col) + .and_then(|c| c.get(key).map(|(_, v)| (v.clone(), Default::default()))) + } } impl MemDb { diff --git a/substrate/primitives/io/src/lib.rs b/substrate/primitives/io/src/lib.rs index 684854ea5c8d..b41221f04dd6 100644 --- a/substrate/primitives/io/src/lib.rs +++ b/substrate/primitives/io/src/lib.rs @@ -563,63 +563,63 @@ pub trait DefaultChildStorage { pub trait Trie { /// A trie root formed from the iterated items. fn blake2_256_root(input: Vec<(Vec, Vec)>) -> H256 { - LayoutV0::::trie_root(input) + LayoutV0::::trie_root(input) } /// A trie root formed from the iterated items. #[version(2)] fn blake2_256_root(input: Vec<(Vec, Vec)>, version: StateVersion) -> H256 { match version { - StateVersion::V0 => LayoutV0::::trie_root(input), - StateVersion::V1 => LayoutV1::::trie_root(input), + StateVersion::V0 => LayoutV0::::trie_root(input), + StateVersion::V1 => LayoutV1::::trie_root(input), } } /// A trie root formed from the enumerated items. fn blake2_256_ordered_root(input: Vec>) -> H256 { - LayoutV0::::ordered_trie_root(input) + LayoutV0::::ordered_trie_root(input) } /// A trie root formed from the enumerated items. #[version(2)] fn blake2_256_ordered_root(input: Vec>, version: StateVersion) -> H256 { match version { - StateVersion::V0 => LayoutV0::::ordered_trie_root(input), - StateVersion::V1 => LayoutV1::::ordered_trie_root(input), + StateVersion::V0 => LayoutV0::::ordered_trie_root(input), + StateVersion::V1 => LayoutV1::::ordered_trie_root(input), } } /// A trie root formed from the iterated items. fn keccak_256_root(input: Vec<(Vec, Vec)>) -> H256 { - LayoutV0::::trie_root(input) + LayoutV0::::trie_root(input) } /// A trie root formed from the iterated items. #[version(2)] fn keccak_256_root(input: Vec<(Vec, Vec)>, version: StateVersion) -> H256 { match version { - StateVersion::V0 => LayoutV0::::trie_root(input), - StateVersion::V1 => LayoutV1::::trie_root(input), + StateVersion::V0 => LayoutV0::::trie_root(input), + StateVersion::V1 => LayoutV1::::trie_root(input), } } /// A trie root formed from the enumerated items. fn keccak_256_ordered_root(input: Vec>) -> H256 { - LayoutV0::::ordered_trie_root(input) + LayoutV0::::ordered_trie_root(input) } /// A trie root formed from the enumerated items. #[version(2)] fn keccak_256_ordered_root(input: Vec>, version: StateVersion) -> H256 { match version { - StateVersion::V0 => LayoutV0::::ordered_trie_root(input), - StateVersion::V1 => LayoutV1::::ordered_trie_root(input), + StateVersion::V0 => LayoutV0::::ordered_trie_root(input), + StateVersion::V1 => LayoutV1::::ordered_trie_root(input), } } /// Verify trie proof fn blake2_256_verify_proof(root: H256, proof: &[Vec], key: &[u8], value: &[u8]) -> bool { - sp_trie::verify_trie_proof::, _, _, _>( + sp_trie::verify_trie_proof::, _, _, _>( &root, proof, &[(key, Some(value))], @@ -638,14 +638,14 @@ pub trait Trie { ) -> bool { match version { StateVersion::V0 => sp_trie::verify_trie_proof::< - LayoutV0, + LayoutV0, _, _, _, >(&root, proof, &[(key, Some(value))]) .is_ok(), StateVersion::V1 => sp_trie::verify_trie_proof::< - LayoutV1, + LayoutV1, _, _, _, @@ -656,7 +656,7 @@ pub trait Trie { /// Verify trie proof fn keccak_256_verify_proof(root: H256, proof: &[Vec], key: &[u8], value: &[u8]) -> bool { - sp_trie::verify_trie_proof::, _, _, _>( + sp_trie::verify_trie_proof::, _, _, _>( &root, proof, &[(key, Some(value))], @@ -675,14 +675,14 @@ pub trait Trie { ) -> bool { match version { StateVersion::V0 => sp_trie::verify_trie_proof::< - LayoutV0, + LayoutV0, _, _, _, >(&root, proof, &[(key, Some(value))]) .is_ok(), StateVersion::V1 => sp_trie::verify_trie_proof::< - LayoutV1, + LayoutV1, _, _, _, diff --git a/substrate/primitives/state-machine/Cargo.toml b/substrate/primitives/state-machine/Cargo.toml index 09994f1ae91e..132aef84c07d 100644 --- a/substrate/primitives/state-machine/Cargo.toml +++ b/substrate/primitives/state-machine/Cargo.toml @@ -18,7 +18,6 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } -hash-db = { version = "0.16.0", default-features = false } log = { workspace = true } parking_lot = { version = "0.12.1", optional = true } rand = { version = "0.8.5", optional = true } @@ -30,7 +29,7 @@ sp-externalities = { path = "../externalities", default-features = false } sp-panic-handler = { path = "../panic-handler", optional = true } sp-std = { path = "../std", default-features = false } sp-trie = { path = "../trie", default-features = false } -trie-db = { version = "0.28.0", default-features = false } +trie-db = { package = "subtrie", version = "0.0.1", default-features = false } [dev-dependencies] array-bytes = "6.1" @@ -43,7 +42,6 @@ assert_matches = "1.5" default = ["std"] std = [ "codec/std", - "hash-db/std", "log/std", "parking_lot", "rand", diff --git a/substrate/primitives/state-machine/src/backend.rs b/substrate/primitives/state-machine/src/backend.rs index ea9cd442d70b..79bd40452388 100644 --- a/substrate/primitives/state-machine/src/backend.rs +++ b/substrate/primitives/state-machine/src/backend.rs @@ -19,18 +19,21 @@ #[cfg(feature = "std")] use crate::trie_backend::TrieBackend; -use crate::{ - trie_backend_essence::TrieBackendStorage, ChildStorageCollection, StorageCollection, - StorageKey, StorageValue, UsageInfo, -}; +use crate::{ChildStorageCollection, StorageCollection, StorageKey, StorageValue, UsageInfo}; +use alloc::{boxed::Box, vec::Vec}; use codec::Encode; use core::marker::PhantomData; -use hash_db::Hasher; use sp_core::storage::{ChildInfo, StateVersion, TrackedStorageKey}; #[cfg(feature = "std")] use sp_core::traits::RuntimeCode; -use sp_std::vec::Vec; -use sp_trie::{MerkleValue, PrefixedMemoryDB}; +use trie_db::node_db::Hasher; + +// TODO should parameterized location from db +// as we currently add overhead to rocksdb and old paritydb in the +// struct size (not that much , not a priority) +/// DB location hint for a trie node. +pub type DBLocation = sp_trie::DBLocation; +use sp_trie::{ChildChangeset, MerkleValue}; /// A struct containing arguments for iterating over the storage. #[derive(Default)] @@ -173,7 +176,7 @@ where /// /// This transaction contains all the changes that need to be applied to the backend to create the /// state for a new block. -pub type BackendTransaction = PrefixedMemoryDB; +pub type BackendTransaction = trie_db::Changeset; /// A state backend is used to read state data and can have changes committed /// to it. @@ -183,9 +186,6 @@ pub trait Backend: sp_std::fmt::Debug { /// An error type when fetching data is not possible. type Error: super::Error; - /// Type of trie backend storage. - type TrieBackendStorage: TrieBackendStorage; - /// Type of the raw storage iterator. type RawIter: StorageIterator; @@ -248,9 +248,9 @@ pub trait Backend: sp_std::fmt::Debug { /// Does not include child storage updates. fn storage_root<'a>( &self, - delta: impl Iterator)>, + delta: impl Iterator, Option>)>, state_version: StateVersion, - ) -> (H::Out, BackendTransaction) + ) -> BackendTransaction where H::Out: Ord; @@ -262,7 +262,7 @@ pub trait Backend: sp_std::fmt::Debug { child_info: &ChildInfo, delta: impl Iterator)>, state_version: StateVersion, - ) -> (H::Out, bool, BackendTransaction) + ) -> (BackendTransaction, bool) where H::Out: Ord; @@ -297,33 +297,36 @@ pub trait Backend: sp_std::fmt::Debug { Item = (&'a ChildInfo, impl Iterator)>), >, state_version: StateVersion, - ) -> (H::Out, BackendTransaction) + ) -> BackendTransaction where H::Out: Ord + Encode, { - let mut txs = BackendTransaction::default(); - let mut child_roots: Vec<_> = Default::default(); + let mut child_roots = Vec::with_capacity(child_deltas.size_hint().0); // child first for (child_info, child_delta) in child_deltas { - let (child_root, empty, child_txs) = + let (child_commit, empty) = self.child_storage_root(child_info, child_delta, state_version); let prefixed_storage_key = child_info.prefixed_storage_key(); - txs.consolidate(child_txs); if empty { - child_roots.push((prefixed_storage_key.into_inner(), None)); + child_roots.push((prefixed_storage_key.into_inner(), None, None)); } else { - child_roots.push((prefixed_storage_key.into_inner(), Some(child_root.encode()))); + let root = child_commit.root_hash(); + child_roots.push(( + prefixed_storage_key.into_inner(), + Some(root.encode()), + Some(Box::new(child_commit)), + )); } } - let (root, parent_txs) = self.storage_root( - delta - .map(|(k, v)| (k, v.as_ref().map(|v| &v[..]))) - .chain(child_roots.iter().map(|(k, v)| (&k[..], v.as_ref().map(|v| &v[..])))), + self.storage_root( + delta.map(|(k, v)| (k, v.as_ref().map(|v| &v[..]), None)).chain( + child_roots.iter_mut().map(|(k, r, c)| { + let root = r.as_ref().map(|r| r.as_slice()); + (k.as_slice(), root, core::mem::take(c)) + }), + ), state_version, - ); - txs.consolidate(parent_txs); - - (root, txs) + ) } /// Register stats from overlay of state machine. @@ -345,8 +348,7 @@ pub trait Backend: sp_std::fmt::Debug { /// Commit given transaction to storage. fn commit( &self, - _: H::Out, - _: BackendTransaction, + _: BackendTransaction, _: StorageCollection, _: ChildStorageCollection, ) -> Result<(), Self::Error> { @@ -384,12 +386,11 @@ pub trait Backend: sp_std::fmt::Debug { /// Something that can be converted into a [`TrieBackend`]. #[cfg(feature = "std")] -pub trait AsTrieBackend> { - /// Type of trie backend storage. - type TrieBackendStorage: TrieBackendStorage; - +pub trait AsTrieBackend> { + /// Return the type as [`TrieBackend`]. + fn as_trie_backend(&self) -> &TrieBackend; /// Return the type as [`TrieBackend`]. - fn as_trie_backend(&self) -> &TrieBackend; + fn as_trie_backend_mut(&mut self) -> &mut TrieBackend; } /// Wrapper to create a [`RuntimeCode`] from a type that implements [`Backend`]. diff --git a/substrate/primitives/state-machine/src/basic.rs b/substrate/primitives/state-machine/src/basic.rs index ace88aee2628..074bb58540bb 100644 --- a/substrate/primitives/state-machine/src/basic.rs +++ b/substrate/primitives/state-machine/src/basic.rs @@ -17,9 +17,8 @@ //! Basic implementation for Externalities. -use crate::{Backend, OverlayedChanges, StorageKey, StorageValue}; +use crate::{Backend, DBLocation, OverlayedChanges, StorageKey, StorageValue}; use codec::Encode; -use hash_db::Hasher; use log::warn; use sp_core::{ storage::{ @@ -35,6 +34,7 @@ use std::{ collections::BTreeMap, iter::FromIterator, }; +use trie_db::node_db::Hasher; /// Simple Map-based Externalities impl. #[derive(Debug)] @@ -258,7 +258,7 @@ impl Externalities for BasicExternalities { // Single child trie implementation currently allows using the same child // empty root for all child trie. Using null storage key until multiple // type of child trie support. - let empty_hash = empty_child_trie_root::>(); + let empty_hash = empty_child_trie_root::>(); for child_info in self.overlay.children().map(|d| d.1.clone()).collect::>() { let child_root = self.child_storage_root(&child_info, state_version); if empty_hash[..] == child_root[..] { @@ -269,8 +269,10 @@ impl Externalities for BasicExternalities { } match state_version { - StateVersion::V0 => LayoutV0::::trie_root(top).as_ref().into(), - StateVersion::V1 => LayoutV1::::trie_root(top).as_ref().into(), + StateVersion::V0 => + LayoutV0::::trie_root(top).as_ref().into(), + StateVersion::V1 => + LayoutV1::::trie_root(top).as_ref().into(), } } @@ -285,8 +287,9 @@ impl Externalities for BasicExternalities { crate::in_memory_backend::new_in_mem::() .child_storage_root(&child_info, delta, state_version) .0 + .root_hash() } else { - empty_child_trie_root::>() + empty_child_trie_root::>() } .encode() } diff --git a/substrate/primitives/state-machine/src/ext.rs b/substrate/primitives/state-machine/src/ext.rs index 11df46f2a4a3..5326577a73ea 100644 --- a/substrate/primitives/state-machine/src/ext.rs +++ b/substrate/primitives/state-machine/src/ext.rs @@ -23,13 +23,13 @@ use crate::{ backend::Backend, IndexOperation, IterArgs, OverlayedChanges, StorageKey, StorageValue, }; use codec::{Encode, EncodeAppend}; -use hash_db::Hasher; #[cfg(feature = "std")] use sp_core::hexdisplay::HexDisplay; use sp_core::storage::{ well_known_keys::is_child_storage_key, ChildInfo, StateVersion, TrackedStorageKey, }; use sp_externalities::{Extension, ExtensionStore, Externalities, MultiRemovalResults}; +use trie_db::node_db::Hasher; use crate::{log_error, trace, warn}; use sp_std::{ @@ -617,7 +617,6 @@ where .expect(EXT_NOT_ALLOWED_TO_FAIL); self.backend .commit( - changes.transaction_storage_root, changes.transaction, changes.main_storage_changes, changes.child_storage_changes, diff --git a/substrate/primitives/state-machine/src/in_memory_backend.rs b/substrate/primitives/state-machine/src/in_memory_backend.rs index 06fe6d4162a7..6685b4e06468 100644 --- a/substrate/primitives/state-machine/src/in_memory_backend.rs +++ b/substrate/primitives/state-machine/src/in_memory_backend.rs @@ -18,26 +18,29 @@ //! State machine in memory backend. use crate::{ - backend::Backend, trie_backend::TrieBackend, StorageCollection, StorageKey, StorageValue, - TrieBackendBuilder, + backend::{Backend, BackendTransaction}, + trie_backend::TrieBackend, + StorageCollection, StorageKey, StorageValue, TrieBackendBuilder, }; use codec::Codec; -use hash_db::Hasher; use sp_core::storage::{ChildInfo, StateVersion, Storage}; use sp_trie::{empty_trie_root, LayoutV1, PrefixedMemoryDB}; use std::collections::{BTreeMap, HashMap}; +use trie_db::node_db::Hasher; /// Create a new empty instance of in-memory backend. -pub fn new_in_mem() -> TrieBackend, H> +pub fn new_in_mem() -> TrieBackend where - H: Hasher, + H: Hasher + 'static, H::Out: Codec + Ord, { + let db = PrefixedMemoryDB::default(); + // let db = MemoryDB::default(); // V1 is same as V0 for an empty trie. - TrieBackendBuilder::new(Default::default(), empty_trie_root::>()).build() + TrieBackendBuilder::new(Box::new(db), empty_trie_root::>()).build() } -impl TrieBackend, H> +impl TrieBackend where H::Out: Codec + Ord, { @@ -46,10 +49,13 @@ where &self, changes: T, state_version: StateVersion, - ) -> Self { - let mut clone = self.clone(); - clone.insert(changes, state_version); - clone + ) -> Option { + if let Some(mut clone) = self.clone_in_mem() { + clone.insert(changes, state_version); + Some(clone) + } else { + None + } } /// Insert values into backend trie. @@ -59,7 +65,7 @@ where state_version: StateVersion, ) { let (top, child) = changes.into_iter().partition::, _>(|v| v.0.is_none()); - let (root, transaction) = self.full_storage_root( + let transaction = self.full_storage_root( top.iter().flat_map(|(_, v)| v).map(|(k, v)| (&k[..], v.as_deref())), child.iter().filter_map(|v| { v.0.as_ref().map(|c| (c, v.1.iter().map(|(k, v)| (&k[..], v.as_deref())))) @@ -67,51 +73,54 @@ where state_version, ); - self.apply_transaction(root, transaction); - } - - /// Merge trie nodes into this backend. - pub fn update_backend(&self, root: H::Out, changes: PrefixedMemoryDB) -> Self { - let mut clone = self.backend_storage().clone(); - clone.consolidate(changes); - TrieBackendBuilder::new(clone, root).build() + self.apply_transaction(transaction); } /// Apply the given transaction to this backend and set the root to the given value. - pub fn apply_transaction(&mut self, root: H::Out, transaction: PrefixedMemoryDB) { - let mut storage = core::mem::take(self).into_storage(); - - storage.consolidate(transaction); - *self = TrieBackendBuilder::new(storage, root).build(); + pub fn apply_transaction(&mut self, transaction: BackendTransaction) { + if let Some(mut mdb) = self.backend_storage_mut().as_mem_db_mut() { + let root = transaction.apply_to(&mut mdb); + self.set_root(root); + } else if let Some(mut mdb) = self.backend_storage_mut().as_prefixed_mem_db_mut() { + let root = transaction.apply_to(&mut mdb); + self.set_root(root); + } else { + unreachable!() + } } /// Compare with another in-memory backend. pub fn eq(&self, other: &Self) -> bool { self.root() == other.root() } -} -impl Clone for TrieBackend, H> -where - H::Out: Codec + Ord, -{ - fn clone(&self) -> Self { - TrieBackendBuilder::new(self.backend_storage().clone(), *self.root()).build() + /// Clone this backend if it backed by in-memory storage. + /// Note that this will clone the underlying storage. + pub fn clone_in_mem(&self) -> Option { + if let Some(db) = self.backend_storage().as_mem_db() { + Some(TrieBackendBuilder::new(Box::new(db.clone()), *self.root()).build()) + } else if let Some(db) = self.backend_storage().as_prefixed_mem_db() { + Some(TrieBackendBuilder::new(Box::new(db.clone()), *self.root()).build()) + } else { + None + } } } -impl Default for TrieBackend, H> +impl TrieBackend where - H: Hasher, + H: Hasher + 'static, H::Out: Codec + Ord, { - fn default() -> Self { + /// New in memory backend. + pub fn default() -> Self { new_in_mem() } } -impl From<(HashMap, BTreeMap>, StateVersion)> - for TrieBackend, H> +impl + From<(HashMap, BTreeMap>, StateVersion)> + for TrieBackend where H::Out: Codec + Ord, { @@ -132,7 +141,7 @@ where } } -impl From<(Storage, StateVersion)> for TrieBackend, H> +impl From<(Storage, StateVersion)> for TrieBackend where H::Out: Codec + Ord, { @@ -147,8 +156,8 @@ where } } -impl From<(BTreeMap, StateVersion)> - for TrieBackend, H> +impl From<(BTreeMap, StateVersion)> + for TrieBackend where H::Out: Codec + Ord, { @@ -159,8 +168,8 @@ where } } -impl From<(Vec<(Option, StorageCollection)>, StateVersion)> - for TrieBackend, H> +impl From<(Vec<(Option, StorageCollection)>, StateVersion)> + for TrieBackend where H::Out: Codec + Ord, { @@ -195,10 +204,12 @@ mod tests { let storage = new_in_mem::(); let child_info = ChildInfo::new_default(b"1"); let child_info = &child_info; - let storage = storage.update( - vec![(Some(child_info.clone()), vec![(b"2".to_vec(), Some(b"3".to_vec()))])], - state_version, - ); + let storage = storage + .update( + vec![(Some(child_info.clone()), vec![(b"2".to_vec(), Some(b"3".to_vec()))])], + state_version, + ) + .unwrap(); let trie_backend = storage.as_trie_backend(); assert_eq!(trie_backend.child_storage(child_info, b"2").unwrap(), Some(b"3".to_vec())); let storage_key = child_info.prefixed_storage_key(); diff --git a/substrate/primitives/state-machine/src/lib.rs b/substrate/primitives/state-machine/src/lib.rs index 5909a30a814c..2adccf851dcd 100644 --- a/substrate/primitives/state-machine/src/lib.rs +++ b/substrate/primitives/state-machine/src/lib.rs @@ -20,6 +20,8 @@ #![warn(missing_docs)] #![cfg_attr(not(feature = "std"), no_std)] +extern crate alloc; + pub mod backend; #[cfg(feature = "std")] mod basic; @@ -118,14 +120,16 @@ pub type DefaultError = String; pub struct DefaultError; #[cfg(not(feature = "std"))] -impl sp_std::fmt::Display for DefaultError { - fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { +impl core::fmt::Display for DefaultError { + fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { write!(f, "DefaultError") } } pub use crate::{ - backend::{Backend, BackendTransaction, IterArgs, KeysIter, PairsIter, StorageIterator}, + backend::{ + Backend, BackendTransaction, DBLocation, IterArgs, KeysIter, PairsIter, StorageIterator, + }, error::{Error, ExecutionError}, ext::Ext, overlayed_changes::{ @@ -135,7 +139,6 @@ pub use crate::{ }, stats::{StateMachineStats, UsageInfo, UsageUnit}, trie_backend::{TrieBackend, TrieBackendBuilder}, - trie_backend_essence::{Storage, TrieBackendStorage}, }; #[cfg(feature = "std")] @@ -145,12 +148,13 @@ mod std_reexport { in_memory_backend::new_in_mem, read_only::{InspectState, ReadOnlyExternalities}, testing::TestExternalities, - trie_backend::create_proof_check_backend, + trie_backend::{create_proof_check_backend, AsDB}, }; pub use sp_trie::{ trie_types::{TrieDBMutV0, TrieDBMutV1}, - CompactProof, DBValue, LayoutV0, LayoutV1, MemoryDB, StorageProof, TrieMut, + CompactProof, DBValue, LayoutV0, LayoutV1, MemoryDB, PrefixedMemoryDB, StorageProof, }; + pub use trie_db::node_db::NodeDB; } #[cfg(feature = "std")] @@ -159,7 +163,6 @@ mod execution { use super::*; use codec::Codec; - use hash_db::Hasher; use smallvec::SmallVec; use sp_core::{ hexdisplay::HexDisplay, @@ -167,8 +170,8 @@ mod execution { traits::{CallContext, CodeExecutor, RuntimeCode}, }; use sp_externalities::Extensions; - use sp_trie::PrefixedMemoryDB; use std::collections::{HashMap, HashSet}; + use trie_db::node_db::Hasher; pub(crate) type CallResult = Result, E>; @@ -176,7 +179,7 @@ mod execution { pub type DefaultHandler = fn(CallResult, CallResult) -> CallResult; /// Trie backend with in-memory storage. - pub type InMemoryBackend = TrieBackend, H>; + pub type InMemoryBackend = TrieBackend; /// Storage backend trust level. #[derive(Debug, Clone)] @@ -321,8 +324,8 @@ mod execution { H::Out: Ord + 'static + codec::Codec, Exec: CodeExecutor + Clone + 'static, { - let trie_backend = backend.as_trie_backend(); - prove_execution_on_trie_backend::<_, _, _>( + let trie_backend = backend.as_trie_backend_mut(); + prove_execution_on_trie_backend::<_, _>( trie_backend, overlay, exec, @@ -342,8 +345,8 @@ mod execution { /// /// Note: changes to code will be in place if this call is made again. For running partial /// blocks (e.g. a transaction at a time), ensure a different method is used. - pub fn prove_execution_on_trie_backend( - trie_backend: &TrieBackend, + pub fn prove_execution_on_trie_backend( + trie_backend: &TrieBackend, overlay: &mut OverlayedChanges, exec: &Exec, method: &str, @@ -352,16 +355,14 @@ mod execution { extensions: &mut Extensions, ) -> Result<(Vec, StorageProof), Box> where - S: trie_backend_essence::TrieBackendStorage, H: Hasher, H::Out: Ord + 'static + codec::Codec, Exec: CodeExecutor + 'static + Clone, { - let proving_backend = - TrieBackendBuilder::wrap(trie_backend).with_recorder(Default::default()).build(); + let proving_backend = trie_backend.with_temp_recorder(Default::default()); let result = StateMachine::<_, H, Exec>::new( - &proving_backend, + &*proving_backend, overlay, exec, method, @@ -407,7 +408,7 @@ mod execution { /// Check execution proof on proving backend, generated by `prove_execution` call. pub fn execution_proof_check_on_trie_backend( - trie_backend: &TrieBackend, H>, + trie_backend: &TrieBackend, overlay: &mut OverlayedChanges, exec: &Exec, method: &str, @@ -433,7 +434,7 @@ mod execution { } /// Generate storage read proof. - pub fn prove_read(backend: B, keys: I) -> Result> + pub fn prove_read(mut backend: B, keys: I) -> Result> where B: AsTrieBackend, H: Hasher, @@ -441,7 +442,7 @@ mod execution { I: IntoIterator, I::Item: AsRef<[u8]>, { - let trie_backend = backend.as_trie_backend(); + let trie_backend = backend.as_trie_backend_mut(); prove_read_on_trie_backend(trie_backend, keys) } @@ -564,7 +565,7 @@ mod execution { /// If a key different than `start_at` is a child trie root, /// the child trie content will be included in the proof. pub fn prove_range_read_with_child_with_size( - backend: B, + mut backend: B, size_limit: usize, start_at: &[Vec], ) -> Result<(StorageProof, u32), Box> @@ -573,20 +574,19 @@ mod execution { H: Hasher, H::Out: Ord + Codec, { - let trie_backend = backend.as_trie_backend(); + let trie_backend = backend.as_trie_backend_mut(); prove_range_read_with_child_with_size_on_trie_backend(trie_backend, size_limit, start_at) } /// Generate range storage read proof, with child tries /// content. /// See `prove_range_read_with_child_with_size`. - pub fn prove_range_read_with_child_with_size_on_trie_backend( - trie_backend: &TrieBackend, + pub fn prove_range_read_with_child_with_size_on_trie_backend( + trie_backend: &TrieBackend, size_limit: usize, start_at: &[Vec], ) -> Result<(StorageProof, u32), Box> where - S: trie_backend_essence::TrieBackendStorage, H: Hasher, H::Out: Ord + Codec, { @@ -595,8 +595,7 @@ mod execution { } let recorder = sp_trie::recorder::Recorder::default(); - let proving_backend = - TrieBackendBuilder::wrap(trie_backend).with_recorder(recorder.clone()).build(); + let proving_backend = trie_backend.with_temp_recorder(recorder.clone()); let mut count = 0; let mut child_roots = HashSet::new(); @@ -686,7 +685,7 @@ mod execution { /// Generate range storage read proof. pub fn prove_range_read_with_size( - backend: B, + mut backend: B, child_info: Option<&ChildInfo>, prefix: Option<&[u8]>, size_limit: usize, @@ -697,7 +696,7 @@ mod execution { H: Hasher, H::Out: Ord + Codec, { - let trie_backend = backend.as_trie_backend(); + let trie_backend = backend.as_trie_backend_mut(); prove_range_read_with_size_on_trie_backend( trie_backend, child_info, @@ -708,21 +707,19 @@ mod execution { } /// Generate range storage read proof on an existing trie backend. - pub fn prove_range_read_with_size_on_trie_backend( - trie_backend: &TrieBackend, + pub fn prove_range_read_with_size_on_trie_backend( + trie_backend: &mut TrieBackend, child_info: Option<&ChildInfo>, prefix: Option<&[u8]>, size_limit: usize, start_at: Option<&[u8]>, ) -> Result<(StorageProof, u32), Box> where - S: trie_backend_essence::TrieBackendStorage, H: Hasher, H::Out: Ord + Codec, { let recorder = sp_trie::recorder::Recorder::default(); - let proving_backend = - TrieBackendBuilder::wrap(trie_backend).with_recorder(recorder.clone()).build(); + let proving_backend = trie_backend.with_temp_recorder(recorder.clone()); let mut count = 0; let iter = proving_backend // NOTE: Even though the loop below doesn't use these values @@ -753,7 +750,7 @@ mod execution { /// Generate child storage read proof. pub fn prove_child_read( - backend: B, + mut backend: B, child_info: &ChildInfo, keys: I, ) -> Result> @@ -764,50 +761,47 @@ mod execution { I: IntoIterator, I::Item: AsRef<[u8]>, { - let trie_backend = backend.as_trie_backend(); + let trie_backend = backend.as_trie_backend_mut(); prove_child_read_on_trie_backend(trie_backend, child_info, keys) } /// Generate storage read proof on pre-created trie backend. - pub fn prove_read_on_trie_backend( - trie_backend: &TrieBackend, + pub fn prove_read_on_trie_backend( + trie_backend: &mut TrieBackend, keys: I, ) -> Result> where - S: trie_backend_essence::TrieBackendStorage, H: Hasher, H::Out: Ord + Codec, I: IntoIterator, I::Item: AsRef<[u8]>, { - let proving_backend = - TrieBackendBuilder::wrap(trie_backend).with_recorder(Default::default()).build(); + let proving_backend = trie_backend.with_temp_recorder(Default::default()); for key in keys.into_iter() { proving_backend .storage(key.as_ref()) .map_err(|e| Box::new(e) as Box)?; } - Ok(proving_backend + let result = proving_backend .extract_proof() - .expect("A recorder was set and thus, a storage proof can be extracted; qed")) + .expect("A recorder was set and thus, a storage proof can be extracted; qed"); + Ok(result) } /// Generate storage read proof on pre-created trie backend. - pub fn prove_child_read_on_trie_backend( - trie_backend: &TrieBackend, + pub fn prove_child_read_on_trie_backend( + trie_backend: &mut TrieBackend, child_info: &ChildInfo, keys: I, ) -> Result> where - S: trie_backend_essence::TrieBackendStorage, H: Hasher, H::Out: Ord + Codec, I: IntoIterator, I::Item: AsRef<[u8]>, { - let proving_backend = - TrieBackendBuilder::wrap(trie_backend).with_recorder(Default::default()).build(); + let proving_backend = trie_backend.with_temp_recorder(Default::default()); for key in keys.into_iter() { proving_backend .child_storage(child_info, key.as_ref()) @@ -909,7 +903,7 @@ mod execution { /// Check storage read proof on pre-created proving backend. pub fn read_proof_check_on_proving_backend( - proving_backend: &TrieBackend, H>, + proving_backend: &TrieBackend, key: &[u8], ) -> Result>, Box> where @@ -921,7 +915,7 @@ mod execution { /// Check child storage read proof on pre-created proving backend. pub fn read_child_proof_check_on_proving_backend( - proving_backend: &TrieBackend, H>, + proving_backend: &TrieBackend, child_info: &ChildInfo, key: &[u8], ) -> Result>, Box> @@ -939,7 +933,7 @@ mod execution { /// Returns a vector with the read `key => value` pairs and a `bool` that is set to `true` when /// all `key => value` pairs could be read and no more are left. pub fn read_range_proof_check_on_proving_backend( - proving_backend: &TrieBackend, H>, + proving_backend: &TrieBackend, child_info: Option<&ChildInfo>, prefix: Option<&[u8]>, count: Option, @@ -975,7 +969,7 @@ mod execution { /// /// See `read_range_proof_check_with_child`. pub fn read_range_proof_check_with_child_on_proving_backend( - proving_backend: &TrieBackend, H>, + proving_backend: &TrieBackend, start_at: &[Vec], ) -> Result<(KeyValueStates, usize), Box> where @@ -1094,13 +1088,9 @@ mod tests { map, storage::{ChildInfo, StateVersion}, traits::{CallContext, CodeExecutor, Externalities, RuntimeCode}, - H256, }; use sp_runtime::traits::BlakeTwo256; - use sp_trie::{ - trie_types::{TrieDBMutBuilderV0, TrieDBMutBuilderV1}, - KeySpacedDBMut, PrefixedMemoryDB, - }; + use sp_trie::trie_types::{TrieDBMutBuilderV0, TrieDBMutBuilderV1}; use std::collections::{BTreeMap, HashMap}; #[derive(Clone)] @@ -1216,7 +1206,8 @@ mod tests { // fetch execution proof from 'remote' full node let mut remote_backend = trie_backend::tests::test_trie(state_version, None, None); - let remote_root = remote_backend.storage_root(std::iter::empty(), state_version).0; + let remote_root = + remote_backend.storage_root(std::iter::empty(), state_version).root_hash(); let (remote_result, remote_proof) = prove_execution( &mut remote_backend, &mut Default::default(), @@ -1526,7 +1517,8 @@ mod tests { let missing_child_info = &missing_child_info; // fetch read proof from 'remote' full node let remote_backend = trie_backend::tests::test_trie(state_version, None, None); - let remote_root = remote_backend.storage_root(std::iter::empty(), state_version).0; + let remote_root = + remote_backend.storage_root(std::iter::empty(), state_version).root_hash(); let remote_proof = prove_read(remote_backend, &[b"value2"]).unwrap(); let remote_proof = test_compact(remote_proof, &remote_root); // check proof locally @@ -1543,7 +1535,8 @@ mod tests { assert_eq!(local_result2, false); // on child trie let remote_backend = trie_backend::tests::test_trie(state_version, None, None); - let remote_root = remote_backend.storage_root(std::iter::empty(), state_version).0; + let remote_root = + remote_backend.storage_root(std::iter::empty(), state_version).root_hash(); let remote_proof = prove_child_read(remote_backend, child_info, &[b"value3"]).unwrap(); let remote_proof = test_compact(remote_proof, &remote_root); let local_result1 = read_child_proof_check::( @@ -1610,7 +1603,7 @@ mod tests { let trie: InMemoryBackend = (storage.clone(), StateVersion::default()).into(); let trie_root = *trie.root(); - let backend = TrieBackendBuilder::wrap(&trie).with_recorder(Default::default()).build(); + let backend = trie.with_temp_recorder(Default::default()); let mut queries = Vec::new(); for c in 0..(5 + nb_child_trie / 2) { // random existing query @@ -1673,7 +1666,8 @@ mod tests { fn prove_read_with_size_limit_works() { let state_version = StateVersion::V0; let remote_backend = trie_backend::tests::test_trie(state_version, None, None); - let remote_root = remote_backend.storage_root(::std::iter::empty(), state_version).0; + let remote_root = + remote_backend.storage_root(::std::iter::empty(), state_version).root_hash(); let (proof, count) = prove_range_read_with_size(remote_backend, None, None, 0, None).unwrap(); // Always contains at least some nodes. @@ -1721,19 +1715,18 @@ mod tests { #[test] fn prove_read_with_size_limit_proof_size() { - let mut root = H256::default(); - let mut mdb = PrefixedMemoryDB::::default(); - { - let mut mdb = KeySpacedDBMut::new(&mut mdb, b""); - let mut trie = TrieDBMutBuilderV1::new(&mut mdb, &mut root).build(); + let mut mdb = MemoryDB::::default(); + let root = { + let mut trie = TrieDBMutBuilderV1::new(&mdb).build(); trie.insert(b"value1", &[123; 1]).unwrap(); trie.insert(b"value2", &[123; 10]).unwrap(); trie.insert(b"value3", &[123; 100]).unwrap(); trie.insert(b"value4", &[123; 1000]).unwrap(); - } + trie.commit().apply_to(&mut mdb) + }; - let remote_backend: TrieBackend, BlakeTwo256> = - TrieBackendBuilder::new(mdb, root) + let remote_backend: TrieBackend = + TrieBackendBuilder::<_>::new(Box::new(mdb), root) .with_optional_cache(None) .with_optional_recorder(None) .build(); @@ -1750,18 +1743,20 @@ mod tests { let mut state_version = StateVersion::V0; let (mut mdb, mut root) = trie_backend::tests::test_db(state_version); { - let mut trie = TrieDBMutBuilderV0::from_existing(&mut mdb, &mut root).build(); + let mut trie = TrieDBMutBuilderV0::from_existing(&mut mdb, root).build(); trie.insert(b"foo", vec![1u8; 1_000].as_slice()) // big inner hash .expect("insert failed"); trie.insert(b"foo2", vec![3u8; 16].as_slice()) // no inner hash .expect("insert failed"); trie.insert(b"foo222", vec![5u8; 100].as_slice()) // inner hash .expect("insert failed"); + root = trie.commit().apply_to(&mut mdb); } let check_proof = |mdb, root, state_version| -> StorageProof { - let remote_backend = TrieBackendBuilder::new(mdb, root).build(); - let remote_root = remote_backend.storage_root(std::iter::empty(), state_version).0; + let remote_backend = TrieBackendBuilder::new(Box::new(mdb), root).build(); + let remote_root = + remote_backend.storage_root(std::iter::empty(), state_version).root_hash(); let remote_proof = prove_read(remote_backend, &[b"foo222"]).unwrap(); // check proof locally let local_result1 = @@ -1784,12 +1779,13 @@ mod tests { // do switch state_version = StateVersion::V1; { - let mut trie = TrieDBMutBuilderV1::from_existing(&mut mdb, &mut root).build(); + let mut trie = TrieDBMutBuilderV1::from_existing(&mdb, root).build(); trie.insert(b"foo222", vec![5u8; 100].as_slice()) // inner hash .expect("insert failed"); // update with same value do change trie.insert(b"foo", vec![1u8; 1000].as_slice()) // inner hash .expect("insert failed"); + root = trie.commit().apply_to(&mut mdb); } let root3 = root; assert!(root1 != root3); @@ -1804,7 +1800,8 @@ mod tests { fn prove_range_with_child_works() { let state_version = StateVersion::V0; let remote_backend = trie_backend::tests::test_trie(state_version, None, None); - let remote_root = remote_backend.storage_root(std::iter::empty(), state_version).0; + let remote_root = + remote_backend.storage_root(std::iter::empty(), state_version).root_hash(); let mut start_at = smallvec::SmallVec::<[Vec; 2]>::new(); let trie_backend = remote_backend.as_trie_backend(); let max_iter = 1000; @@ -1855,7 +1852,7 @@ mod tests { let child_info3 = ChildInfo::new_default(b"sub"); let remote_backend = trie_backend::tests::test_trie(state_version, None, None); let long_vec: Vec = (0..1024usize).map(|_| 8u8).collect(); - let (remote_root, transaction) = remote_backend.full_storage_root( + let transaction = remote_backend.full_storage_root( std::iter::empty(), vec![ ( @@ -1885,9 +1882,9 @@ mod tests { .into_iter(), state_version, ); - let mut remote_storage = remote_backend.backend_storage().clone(); - remote_storage.consolidate(transaction); - let remote_backend = TrieBackendBuilder::new(remote_storage, remote_root).build(); + let mut remote_storage = remote_backend.backend_storage().as_mem_db().unwrap().clone(); + let remote_root = transaction.apply_to(&mut remote_storage); + let remote_backend = TrieBackendBuilder::new(Box::new(remote_storage), remote_root).build(); let remote_proof = prove_child_read(remote_backend, &child_info1, &[b"key1"]).unwrap(); let size = remote_proof.encoded_size(); let remote_proof = test_compact(remote_proof, &remote_root); @@ -1912,7 +1909,7 @@ mod tests { use crate::trie_backend::tests::test_trie; let mut overlay = OverlayedChanges::default(); - let mut transaction = { + let transaction = { let backend = test_trie(state_version, None, None); let mut ext = Ext::new(&mut overlay, &backend, None); ext.set_child_storage(&child_info_1, b"abc".to_vec(), b"def".to_vec()); @@ -1921,7 +1918,9 @@ mod tests { overlay.drain_storage_changes(&backend, state_version).unwrap().transaction }; let mut duplicate = false; - for (k, (value, rc)) in transaction.drain().iter() { + let mut memdb = PrefixedMemoryDB::::default(); + transaction.apply_to(&mut memdb); + for (k, (value, rc)) in memdb.drain().iter() { // look for a key inserted twice: transaction rc is 2 if *rc == 2 { duplicate = true; diff --git a/substrate/primitives/state-machine/src/overlayed_changes/mod.rs b/substrate/primitives/state-machine/src/overlayed_changes/mod.rs index 626cf6c3cafe..8559ab90779e 100644 --- a/substrate/primitives/state-machine/src/overlayed_changes/mod.rs +++ b/substrate/primitives/state-machine/src/overlayed_changes/mod.rs @@ -22,8 +22,10 @@ mod offchain; use self::changeset::OverlayedChangeSet; use crate::{backend::Backend, stats::StateMachineStats, BackendTransaction, DefaultError}; +#[cfg(not(feature = "std"))] +use alloc::collections::btree_map::BTreeMap as Map; +use alloc::{collections::btree_set::BTreeSet, vec::Vec}; use codec::{Decode, Encode}; -use hash_db::Hasher; pub use offchain::OffchainOverlayedChanges; use sp_core::{ offchain::OffchainOverlayedChange, @@ -31,10 +33,7 @@ use sp_core::{ }; #[cfg(feature = "std")] use sp_externalities::{Extension, Extensions}; -#[cfg(not(feature = "std"))] -use sp_std::collections::btree_map::BTreeMap as Map; -use sp_std::{collections::btree_set::BTreeSet, vec::Vec}; -use sp_trie::{empty_child_trie_root, LayoutV1}; +use sp_trie::{empty_child_trie_root, DBLocation, LayoutV1}; #[cfg(feature = "std")] use std::collections::{hash_map::Entry as MapEntry, HashMap as Map}; #[cfg(feature = "std")] @@ -42,6 +41,7 @@ use std::{ any::{Any, TypeId}, boxed::Box, }; +use trie_db::node_db::Hasher; pub use self::changeset::{AlreadyInRuntime, NoOpenTransaction, NotInRuntime, OverlayedValue}; @@ -131,7 +131,9 @@ impl Clone for OverlayedChanges { transaction_index_ops: self.transaction_index_ops.clone(), collect_extrinsics: self.collect_extrinsics, stats: self.stats.clone(), - storage_transaction_cache: self.storage_transaction_cache.clone(), + storage_transaction_cache: None, /* TODO try tests with old code (very important). + * storage_transaction_cache: + * self.storage_transaction_cache.clone(), */ } } } @@ -188,9 +190,7 @@ pub struct StorageChanges { /// [`main_storage_changes`](StorageChanges::main_storage_changes) and from /// [`child_storage_changes`](StorageChanges::child_storage_changes). /// [`offchain_storage_changes`](StorageChanges::offchain_storage_changes). - pub transaction: BackendTransaction, - /// The storage root after applying the transaction. - pub transaction_storage_root: H::Out, + pub transaction: BackendTransaction, /// Changes to the transaction index, #[cfg(feature = "std")] pub transaction_index_changes: Vec, @@ -205,8 +205,7 @@ impl StorageChanges { StorageCollection, ChildStorageCollection, OffchainChangesCollection, - BackendTransaction, - H::Out, + BackendTransaction, Vec, ) { ( @@ -214,7 +213,6 @@ impl StorageChanges { self.child_storage_changes, self.offchain_storage_changes, self.transaction, - self.transaction_storage_root, self.transaction_index_changes, ) } @@ -226,8 +224,7 @@ impl Default for StorageChanges { main_storage_changes: Default::default(), child_storage_changes: Default::default(), offchain_storage_changes: Default::default(), - transaction: Default::default(), - transaction_storage_root: Default::default(), + transaction: BackendTransaction::unchanged(Default::default()), #[cfg(feature = "std")] transaction_index_changes: Default::default(), } @@ -239,35 +236,34 @@ impl Default for StorageChanges { /// storage. So, we cache them to not require a recomputation of those transactions. struct StorageTransactionCache { /// Contains the changes for the main and the child storages as one transaction. - transaction: BackendTransaction, - /// The storage root after applying the transaction. - transaction_storage_root: H::Out, + pub(crate) transaction: BackendTransaction, } impl StorageTransactionCache { - fn into_inner(self) -> (BackendTransaction, H::Out) { - (self.transaction, self.transaction_storage_root) + fn into_inner(self) -> BackendTransaction { + self.transaction } } +/* impl Clone for StorageTransactionCache { fn clone(&self) -> Self { Self { transaction: self.transaction.clone(), - transaction_storage_root: self.transaction_storage_root, } } } +*/ impl sp_std::fmt::Debug for StorageTransactionCache { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { let mut debug = f.debug_struct("StorageTransactionCache"); #[cfg(feature = "std")] - debug.field("transaction_storage_root", &self.transaction_storage_root); + debug.field("transaction_storage_root", &self.transaction.root_hash()); #[cfg(not(feature = "std"))] - debug.field("transaction_storage_root", &self.transaction_storage_root.as_ref()); + debug.field("transaction_storage_root", &self.transaction.root_hash().as_ref()); debug.finish() } @@ -560,7 +556,7 @@ impl OverlayedChanges { where H::Out: Ord + Encode + 'static, { - let (transaction, transaction_storage_root) = match self.storage_transaction_cache.take() { + let transaction = match self.storage_transaction_cache.take() { Some(cache) => cache.into_inner(), // If the transaction does not exist, we generate it. None => { @@ -590,7 +586,6 @@ impl OverlayedChanges { .collect(), offchain_storage_changes, transaction, - transaction_storage_root, #[cfg(feature = "std")] transaction_index_changes, }) @@ -629,7 +624,7 @@ impl OverlayedChanges { H::Out: Ord + Encode, { if let Some(cache) = &self.storage_transaction_cache { - return (cache.transaction_storage_root, true) + return (cache.transaction.root_hash(), true) } let delta = self.changes().map(|(k, v)| (&k[..], v.value().map(|v| &v[..]))); @@ -637,10 +632,11 @@ impl OverlayedChanges { (info, changes.map(|(k, v)| (&k[..], v.value().map(|v| &v[..])))) }); - let (root, transaction) = backend.full_storage_root(delta, child_delta, state_version); + let transaction = backend.full_storage_root(delta, child_delta, state_version); + + let root = transaction.root_hash(); - self.storage_transaction_cache = - Some(StorageTransactionCache { transaction, transaction_storage_root: root }); + self.storage_transaction_cache = Some(StorageTransactionCache { transaction }); (root, false) } @@ -670,19 +666,20 @@ impl OverlayedChanges { .flatten() .and_then(|k| Decode::decode(&mut &k[..]).ok()) // V1 is equivalent to V0 on empty root. - .unwrap_or_else(empty_child_trie_root::>); + .unwrap_or_else(empty_child_trie_root::>); return Ok((root, true)) } - let root = if let Some((changes, info)) = self.child_changes(storage_key) { + let commit = if let Some((changes, info)) = self.child_changes(storage_key) { let delta = changes.map(|(k, v)| (k.as_ref(), v.value().map(AsRef::as_ref))); Some(backend.child_storage_root(info, delta, state_version)) } else { None }; - let root = if let Some((root, is_empty, _)) = root { + let root = if let Some((commit, is_empty)) = commit { + let root = commit.root_hash(); // We store update in the overlay in order to be able to use // 'self.storage_transaction' cache. This is brittle as it rely on Ext only querying // the trie backend for storage root. @@ -699,7 +696,7 @@ impl OverlayedChanges { .storage(prefixed_storage_key.as_slice())? .and_then(|k| Decode::decode(&mut &k[..]).ok()) // V1 is equivalent to V0 on empty root. - .unwrap_or_else(empty_child_trie_root::>); + .unwrap_or_else(empty_child_trie_root::>); root }; diff --git a/substrate/primitives/state-machine/src/read_only.rs b/substrate/primitives/state-machine/src/read_only.rs index 2056bf986635..b4dd179977dd 100644 --- a/substrate/primitives/state-machine/src/read_only.rs +++ b/substrate/primitives/state-machine/src/read_only.rs @@ -19,7 +19,6 @@ use crate::{Backend, StorageKey, StorageValue}; use codec::Encode; -use hash_db::Hasher; use sp_core::{ storage::{ChildInfo, StateVersion, TrackedStorageKey}, traits::Externalities, @@ -29,6 +28,7 @@ use std::{ any::{Any, TypeId}, marker::PhantomData, }; +use trie_db::node_db::Hasher; /// Trait for inspecting state in any backend. /// diff --git a/substrate/primitives/state-machine/src/testing.rs b/substrate/primitives/state-machine/src/testing.rs index 0eb7b6d1118f..121672da4ea0 100644 --- a/substrate/primitives/state-machine/src/testing.rs +++ b/substrate/primitives/state-machine/src/testing.rs @@ -27,7 +27,6 @@ use crate::{ TrieBackendBuilder, }; -use hash_db::{HashDB, Hasher}; use sp_core::{ offchain::testing::TestPersistentOffchainDB, storage::{ @@ -37,6 +36,7 @@ use sp_core::{ }; use sp_externalities::{Extension, ExtensionStore, Extensions}; use sp_trie::{PrefixedMemoryDB, StorageProof}; +use trie_db::node_db::Hasher; /// Simple HashMap-based Externalities impl. pub struct TestExternalities @@ -182,7 +182,7 @@ where } Self { - backend: TrieBackendBuilder::new(backend, storage_root).build(), + backend: TrieBackendBuilder::new(Box::new(backend), storage_root).build(), overlay: Default::default(), offchain_db: Default::default(), extensions: Default::default(), @@ -193,23 +193,24 @@ where /// Drains the underlying raw storage key/values and returns the root hash. /// /// Useful for backing up the storage in a format that can be quickly re-loaded. + /// + /// Note: This DB will be inoperable after this call. pub fn into_raw_snapshot(mut self) -> (Vec<(Vec, (Vec, i32))>, H::Out) { - let raw_key_values = self - .backend - .backend_storage_mut() - .drain() - .into_iter() - .filter(|(_, (_, r))| *r > 0) - .collect::, (Vec, i32))>>(); + if let Some(mdb) = self.backend.backend_storage_mut().as_prefixed_mem_db_mut() { + let raw_key_values = + mdb.drain().into_iter().collect::, (Vec, i32))>>(); - (raw_key_values, *self.backend.root()) + (raw_key_values, *self.backend.root()) + } else { + Default::default() + } } /// Return a new backend with all pending changes. /// /// In contrast to [`commit_all`](Self::commit_all) this will not panic if there are open /// transactions. - pub fn as_backend(&self) -> InMemoryBackend { + pub fn as_backend(&self) -> Option> { let top: Vec<_> = self.overlay.changes().map(|(k, v)| (k.clone(), v.value().cloned())).collect(); let mut transaction = vec![(None, top)]; @@ -232,8 +233,7 @@ where pub fn commit_all(&mut self) -> Result<(), String> { let changes = self.overlay.drain_storage_changes(&self.backend, self.state_version)?; - self.backend - .apply_transaction(changes.transaction_storage_root, changes.transaction); + self.backend.apply_transaction(changes.transaction); Ok(()) } @@ -251,11 +251,9 @@ where /// This implementation will wipe the proof recorded in between calls. Consecutive calls will /// get their own proof from scratch. pub fn execute_and_prove(&mut self, execute: impl FnOnce() -> R) -> (R, StorageProof) { - let proving_backend = TrieBackendBuilder::wrap(&self.backend) - .with_recorder(Default::default()) - .build(); + let proving_backend = self.backend.with_temp_recorder(Default::default()); let mut proving_ext = - Ext::new(&mut self.overlay, &proving_backend, Some(&mut self.extensions)); + Ext::new(&mut self.overlay, &*proving_backend, Some(&mut self.extensions)); let outcome = sp_externalities::set_and_run_with_externalities(&mut proving_ext, execute); let proof = proving_backend.extract_proof().expect("Failed to extract storage proof"); @@ -300,7 +298,28 @@ where /// This doesn't test if they are in the same state, only if they contains the /// same data at this state fn eq(&self, other: &TestExternalities) -> bool { - self.as_backend().eq(&other.as_backend()) + let this_backend = self.as_backend(); + let other_backend = other.as_backend(); + match (this_backend, other_backend) { + (Some(this_backend), Some(other_backend)) => { + match ( + other_backend.backend_storage().as_mem_db(), + this_backend.backend_storage().as_mem_db(), + ) { + (Some(other), Some(this)) => return other == this, + _ => (), + } + match ( + other_backend.backend_storage().as_prefixed_mem_db(), + this_backend.backend_storage().as_prefixed_mem_db(), + ) { + (Some(other), Some(this)) => return other == this, + _ => (), + } + false + }, + _ => false, + } } } @@ -399,59 +418,7 @@ mod tests { assert_eq!(H256::from_slice(ext.storage_root(Default::default()).as_slice()), root); } - #[test] - fn raw_storage_drain_and_restore() { - // Create a TestExternalities with some data in it. - let mut original_ext = - TestExternalities::::from((Default::default(), Default::default())); - original_ext.insert(b"doe".to_vec(), b"reindeer".to_vec()); - original_ext.insert(b"dog".to_vec(), b"puppy".to_vec()); - original_ext.insert(b"dogglesworth".to_vec(), b"cat".to_vec()); - let child_info = ChildInfo::new_default(&b"test_child"[..]); - original_ext.insert_child(child_info.clone(), b"cattytown".to_vec(), b"is_dark".to_vec()); - original_ext.insert_child(child_info.clone(), b"doggytown".to_vec(), b"is_sunny".to_vec()); - - // Apply the backend to itself again to increase the ref count of all nodes. - original_ext.backend.apply_transaction( - *original_ext.backend.root(), - original_ext.backend.clone().into_storage(), - ); - - // Ensure all have the correct ref counrt - assert!(original_ext.backend.backend_storage().keys().values().all(|r| *r == 2)); - - // Drain the raw storage and root. - let root = *original_ext.backend.root(); - let (raw_storage, storage_root) = original_ext.into_raw_snapshot(); - - // Load the raw storage and root into a new TestExternalities. - let recovered_ext = TestExternalities::::from_raw_snapshot( - raw_storage, - storage_root, - Default::default(), - ); - - // Check the storage root is the same as the original - assert_eq!(root, *recovered_ext.backend.root()); - - // Check the original storage key/values were recovered correctly - assert_eq!(recovered_ext.backend.storage(b"doe").unwrap(), Some(b"reindeer".to_vec())); - assert_eq!(recovered_ext.backend.storage(b"dog").unwrap(), Some(b"puppy".to_vec())); - assert_eq!(recovered_ext.backend.storage(b"dogglesworth").unwrap(), Some(b"cat".to_vec())); - - // Check the original child storage key/values were recovered correctly - assert_eq!( - recovered_ext.backend.child_storage(&child_info, b"cattytown").unwrap(), - Some(b"is_dark".to_vec()) - ); - assert_eq!( - recovered_ext.backend.child_storage(&child_info, b"doggytown").unwrap(), - Some(b"is_sunny".to_vec()) - ); - - // Ensure all have the correct ref count after importing - assert!(recovered_ext.backend.backend_storage().keys().values().all(|r| *r == 2)); - } + // TODO restore raw_storage_drain_and_restore()?? #[test] fn set_and_retrieve_code() { @@ -508,7 +475,7 @@ mod tests { ext.set_storage(b"dogglesworth".to_vec(), b"cat".to_vec()); } - let backend = ext.as_backend(); + let backend = ext.as_backend().unwrap(); ext.commit_all().unwrap(); assert!(ext.backend.eq(&backend), "Both backend should be equal."); diff --git a/substrate/primitives/state-machine/src/trie_backend.rs b/substrate/primitives/state-machine/src/trie_backend.rs index 7496463e6421..99a25d41be97 100644 --- a/substrate/primitives/state-machine/src/trie_backend.rs +++ b/substrate/primitives/state-machine/src/trie_backend.rs @@ -20,33 +20,35 @@ #[cfg(feature = "std")] use crate::backend::AsTrieBackend; use crate::{ - backend::{IterArgs, StorageIterator}, - trie_backend_essence::{RawIter, TrieBackendEssence, TrieBackendStorage}, + backend::{BackendTransaction, DBLocation, IterArgs, StorageIterator}, + trie_backend_essence::{RawIter, TrieBackendEssence}, Backend, StorageKey, StorageValue, }; +#[cfg(not(feature = "std"))] +use alloc::boxed::Box; use codec::Codec; -#[cfg(feature = "std")] -use hash_db::HashDB; -use hash_db::Hasher; use sp_core::storage::{ChildInfo, StateVersion}; #[cfg(feature = "std")] +use sp_trie::cache::{LocalTrieCache, TrieCache}; use sp_trie::{ - cache::{LocalTrieCache, TrieCache}, - MemoryDB, + ChildChangeset, DBValue, MemoryDB, MerkleValue, PrefixedMemoryDB, StorageProof, + TrieRecorderProvider, }; #[cfg(not(feature = "std"))] use sp_trie::{Error, NodeCodec}; -use sp_trie::{MerkleValue, PrefixedMemoryDB, StorageProof, TrieRecorderProvider}; -use trie_db::TrieCache as TrieCacheT; #[cfg(not(feature = "std"))] use trie_db::{node::NodeOwned, CachedValue}; +use trie_db::{ + node_db::{Hasher, NodeDB}, + TrieCache as TrieCacheT, +}; /// A provider of trie caches that are compatible with [`trie_db::TrieDB`]. pub trait TrieCacheProvider { /// Cache type that implements [`trie_db::TrieCache`]. - type Cache<'a>: TrieCacheT> + 'a + type Cache<'a>: TrieCacheT, DBLocation> + 'a where Self: 'a; @@ -71,9 +73,12 @@ pub trait TrieCacheProvider { fn merge<'a>(&'a self, other: Self::Cache<'a>, new_root: H::Out); } +// TODO pass DBLocation from backend (no use of having +// something u64 in structs for rocksdb or old paritydb). +// (for no_std it is ()). #[cfg(feature = "std")] -impl TrieCacheProvider for LocalTrieCache { - type Cache<'a> = TrieCache<'a, H> where H: 'a; +impl TrieCacheProvider for LocalTrieCache { + type Cache<'a> = TrieCache<'a, H, DBLocation> where H: 'a; fn as_trie_db_cache(&self, storage_root: H::Out) -> Self::Cache<'_> { self.as_trie_db_cache(storage_root) @@ -89,8 +94,8 @@ impl TrieCacheProvider for LocalTrieCache { } #[cfg(feature = "std")] -impl TrieCacheProvider for &LocalTrieCache { - type Cache<'a> = TrieCache<'a, H> where Self: 'a; +impl TrieCacheProvider for &LocalTrieCache { + type Cache<'a> = TrieCache<'a, H, DBLocation> where Self: 'a; fn as_trie_db_cache(&self, storage_root: H::Out) -> Self::Cache<'_> { (*self).as_trie_db_cache(storage_root) @@ -108,38 +113,44 @@ impl TrieCacheProvider for &LocalTrieCache { /// Cache provider that allows construction of a [`TrieBackend`] and satisfies the requirements, but /// can never be instantiated. #[cfg(not(feature = "std"))] -pub struct UnimplementedCacheProvider { +pub struct UnimplementedCacheProvider { // Not strictly necessary, but the H bound allows to use this as a drop-in // replacement for the `LocalTrieCache` in no-std contexts. - _phantom: core::marker::PhantomData, + _phantom: core::marker::PhantomData<(H, L)>, } #[cfg(not(feature = "std"))] -impl trie_db::TrieCache> for UnimplementedCacheProvider { - fn lookup_value_for_key(&mut self, _key: &[u8]) -> Option<&CachedValue> { +impl trie_db::TrieCache, L> for UnimplementedCacheProvider { + fn lookup_value_for_key(&mut self, _key: &[u8]) -> Option<&CachedValue> { unimplemented!() } - fn cache_value_for_key(&mut self, _key: &[u8], _value: CachedValue) { + fn cache_value_for_key(&mut self, _key: &[u8], _value: CachedValue) { unimplemented!() } fn get_or_insert_node( &mut self, _hash: H::Out, - _fetch_node: &mut dyn FnMut() -> trie_db::Result, H::Out, Error>, - ) -> trie_db::Result<&NodeOwned, H::Out, Error> { + _location: L, + _fetch_node: &mut dyn FnMut() + -> trie_db::Result, H::Out, Error>, + ) -> trie_db::Result<&NodeOwned, H::Out, Error> { unimplemented!() } - fn get_node(&mut self, _hash: &H::Out) -> Option<&NodeOwned> { + fn get_node(&mut self, _hash: &H::Out, _location: L) -> Option<&NodeOwned> { + unimplemented!() + } + + fn insert_new_node(&mut self, _hash: &H::Out) { unimplemented!() } } #[cfg(not(feature = "std"))] -impl TrieCacheProvider for UnimplementedCacheProvider { - type Cache<'a> = UnimplementedCacheProvider where H: 'a; +impl TrieCacheProvider for UnimplementedCacheProvider { + type Cache<'a> = UnimplementedCacheProvider where H: 'a, L: 'a; fn as_trie_db_cache(&self, _storage_root: ::Out) -> Self::Cache<'_> { unimplemented!() @@ -164,8 +175,8 @@ pub struct UnimplementedRecorderProvider { } #[cfg(not(feature = "std"))] -impl trie_db::TrieRecorder for UnimplementedRecorderProvider { - fn record<'a>(&mut self, _access: trie_db::TrieAccess<'a, H::Out>) { +impl trie_db::TrieRecorder for UnimplementedRecorderProvider { + fn record<'a>(&mut self, _access: trie_db::TrieAccess<'a, H::Out, DBLocation>) { unimplemented!() } @@ -175,10 +186,10 @@ impl trie_db::TrieRecorder for UnimplementedRecorderProvider< } #[cfg(not(feature = "std"))] -impl TrieRecorderProvider for UnimplementedRecorderProvider { +impl TrieRecorderProvider for UnimplementedRecorderProvider { type Recorder<'a> = UnimplementedRecorderProvider where H: 'a; - fn drain_storage_proof(self) -> Option { + fn drain_storage_proof(&self) -> Option { unimplemented!() } @@ -187,66 +198,106 @@ impl TrieRecorderProvider for UnimplementedRecorderProvider { } } +#[cfg(not(feature = "std"))] +impl Default for UnimplementedRecorderProvider { + fn default() -> Self { + UnimplementedRecorderProvider { _phantom: core::marker::PhantomData } + } +} + #[cfg(feature = "std")] -type DefaultCache = LocalTrieCache; +type DefaultCache = LocalTrieCache; #[cfg(not(feature = "std"))] -type DefaultCache = UnimplementedCacheProvider; +type DefaultCache = UnimplementedCacheProvider; + +/// Optional features for the database backend. +pub trait AsDB: NodeDB { + /// Returns the underlying `MemoryDB` if this is a `MemoryDB`. + fn as_mem_db(&self) -> Option<&MemoryDB> { + None + } + + /// Returns the underlying `MemoryDB` if this is a `PrefixedMemoryDB`. + fn as_prefixed_mem_db(&self) -> Option<&PrefixedMemoryDB> { + None + } + + /// Returns the underlying `MemoryDB` if this is a `MemoryDB`. + fn as_mem_db_mut(&mut self) -> Option<&mut MemoryDB> { + None + } + + /// Returns the underlying `MemoryDB` if this is a `MemoryDB`. + fn as_prefixed_mem_db_mut(&mut self) -> Option<&mut PrefixedMemoryDB> { + None + } + + /// Returns the underlying `NodeDB`. + fn as_node_db(&self) -> &dyn NodeDB; +} + +impl AsDB for MemoryDB { + fn as_mem_db(&self) -> Option<&MemoryDB> { + Some(self) + } + + fn as_mem_db_mut(&mut self) -> Option<&mut MemoryDB> { + Some(self) + } + + fn as_node_db(&self) -> &dyn NodeDB { + self + } +} + +impl AsDB for PrefixedMemoryDB { + fn as_prefixed_mem_db(&self) -> Option<&PrefixedMemoryDB> { + Some(self) + } + + fn as_prefixed_mem_db_mut(&mut self) -> Option<&mut PrefixedMemoryDB> { + Some(self) + } + + fn as_node_db(&self) -> &dyn NodeDB { + self + } +} #[cfg(feature = "std")] -type DefaultRecorder = sp_trie::recorder::Recorder; +type DefaultRecorder = sp_trie::recorder::Recorder; #[cfg(not(feature = "std"))] type DefaultRecorder = UnimplementedRecorderProvider; /// Builder for creating a [`TrieBackend`]. -pub struct TrieBackendBuilder< - S: TrieBackendStorage, - H: Hasher, - C = DefaultCache, - R = DefaultRecorder, -> { - storage: S, +pub struct TrieBackendBuilder, R = DefaultRecorder> { + storage: Box>, root: H::Out, recorder: Option, cache: Option, } -impl TrieBackendBuilder +impl TrieBackendBuilder where - S: TrieBackendStorage, H: Hasher, { /// Create a new builder instance. - pub fn new(storage: S, root: H::Out) -> Self { + pub fn new(storage: Box>, root: H::Out) -> Self { Self { storage, root, recorder: None, cache: None } } } -impl TrieBackendBuilder +impl TrieBackendBuilder where - S: TrieBackendStorage, H: Hasher, + R: TrieRecorderProvider, { /// Create a new builder instance. - pub fn new_with_cache(storage: S, root: H::Out, cache: C) -> Self { + pub fn new_with_cache(storage: Box>, root: H::Out, cache: C) -> Self { Self { storage, root, recorder: None, cache: Some(cache) } } - /// Wrap the given [`TrieBackend`]. - /// - /// This can be used for example if all accesses to the trie should - /// be recorded while some other functionality still uses the non-recording - /// backend. - /// - /// The backend storage and the cache will be taken from `other`. - pub fn wrap(other: &TrieBackend) -> TrieBackendBuilder<&S, H, &C, R> { - TrieBackendBuilder { - storage: other.essence.backend_storage(), - root: *other.essence.root(), - recorder: None, - cache: other.essence.trie_node_cache.as_ref(), - } - } /// Use the given optional `recorder` for the to be configured [`TrieBackend`]. pub fn with_optional_recorder(self, recorder: Option) -> Self { @@ -259,7 +310,7 @@ where } /// Use the given optional `cache` for the to be configured [`TrieBackend`]. - pub fn with_optional_cache(self, cache: Option) -> TrieBackendBuilder { + pub fn with_optional_cache(self, cache: Option) -> TrieBackendBuilder { TrieBackendBuilder { cache, root: self.root, @@ -269,7 +320,7 @@ where } /// Use the given `cache` for the to be configured [`TrieBackend`]. - pub fn with_cache(self, cache: LC) -> TrieBackendBuilder { + pub fn with_cache(self, cache: LC) -> TrieBackendBuilder { TrieBackendBuilder { cache: Some(cache), root: self.root, @@ -279,7 +330,7 @@ where } /// Build the configured [`TrieBackend`]. - pub fn build(self) -> TrieBackend { + pub fn build(self) -> TrieBackend { TrieBackend { essence: TrieBackendEssence::new_with_cache_and_recorder( self.storage, @@ -293,15 +344,15 @@ where } /// A cached iterator. -struct CachedIter +struct CachedIter where H: Hasher, { - last_key: sp_std::vec::Vec, - iter: RawIter, + last_key: alloc::vec::Vec, + iter: RawIter, } -impl Default for CachedIter +impl Default for CachedIter where H: Hasher, { @@ -328,42 +379,40 @@ fn access_cache(cell: &CacheCell, callback: impl FnOnce(&mut T) -> R) - /// Patricia trie-based backend. Transaction type is an overlay of changes to commit. pub struct TrieBackend< - S: TrieBackendStorage, H: Hasher, - C = DefaultCache, + C = DefaultCache, + // dblocation for size of structs R = DefaultRecorder, > { - pub(crate) essence: TrieBackendEssence, - next_storage_key_cache: CacheCell>>, + pub(crate) essence: TrieBackendEssence, + next_storage_key_cache: CacheCell>>, } -impl< - S: TrieBackendStorage, - H: Hasher, - C: TrieCacheProvider + Send + Sync, - R: TrieRecorderProvider + Send + Sync, - > TrieBackend +impl TrieBackend where + H: Hasher, H::Out: Codec, + C: TrieCacheProvider + Send + Sync, + R: TrieRecorderProvider + Send + Sync, { #[cfg(test)] - pub(crate) fn from_essence(essence: TrieBackendEssence) -> Self { + pub(crate) fn from_essence(essence: TrieBackendEssence) -> Self { Self { essence, next_storage_key_cache: Default::default() } } /// Get backend essence reference. - pub fn essence(&self) -> &TrieBackendEssence { + pub fn essence(&self) -> &TrieBackendEssence { &self.essence } /// Get backend storage reference. - pub fn backend_storage_mut(&mut self) -> &mut S { - self.essence.backend_storage_mut() + pub fn backend_storage(&self) -> &dyn AsDB { + self.essence.backend_storage() } /// Get backend storage reference. - pub fn backend_storage(&self) -> &S { - self.essence.backend_storage() + pub fn backend_storage_mut(&mut self) -> &mut dyn AsDB { + self.essence.backend_storage_mut() } /// Set trie root. @@ -376,21 +425,85 @@ where self.essence.root() } - /// Consumes self and returns underlying storage. - pub fn into_storage(self) -> S { - self.essence.into_storage() + /// Set recorder. Returns the previous recorder. + pub fn set_recorder(&self, recorder: Option) -> Option { + self.essence.set_recorder(recorder) + } + + /// Set recorder temporarily. Previous recorder is restored when the returned guard is dropped. + pub fn with_temp_recorder(&self, recorder: R) -> WithRecorder { + //let proving_backend = + // TrieBackendBuilder::wrap(trie_backend).with_recorder(Default::default()).build(); + WithRecorder::new(self, recorder) } /// Extract the [`StorageProof`]. /// /// This only returns `Some` when there was a recorder set. - pub fn extract_proof(mut self) -> Option { - self.essence.recorder.take().and_then(|r| r.drain_storage_proof()) + pub fn extract_proof(&self) -> Option { + #[cfg(feature = "std")] + let r = &*self.essence.recorder.read(); + #[cfg(not(feature = "std"))] + let r = self.essence.recorder.borrow(); + r.as_ref().and_then(|r| r.drain_storage_proof()) } } -impl, H: Hasher, C: TrieCacheProvider, R: TrieRecorderProvider> - sp_std::fmt::Debug for TrieBackend +pub struct WithRecorder<'a, H, C, R> +where + H: Hasher, + H::Out: Codec, + C: TrieCacheProvider + Send + Sync, + R: TrieRecorderProvider + Send + Sync, +{ + backend: &'a TrieBackend, + recorder: Option, +} + +impl<'a, H, C, R> WithRecorder<'a, H, C, R> +where + H: Hasher, + H::Out: Codec, + C: TrieCacheProvider + Send + Sync, + R: TrieRecorderProvider + Send + Sync, +{ + fn new(backend: &'a TrieBackend, recorder: R) -> Self { + let prev_recorder = backend.set_recorder(Some(recorder)); + Self { backend, recorder: prev_recorder } + } +} + +impl Drop for WithRecorder<'_, H, C, R> +where + H: Hasher, + H::Out: Codec, + C: TrieCacheProvider + Send + Sync, + R: TrieRecorderProvider + Send + Sync, +{ + fn drop(&mut self) { + self.backend.set_recorder(self.recorder.take()); + } +} + +impl<'a, H, C, R> core::ops::Deref for WithRecorder<'a, H, C, R> +where + H: Hasher, + H::Out: Codec, + C: TrieCacheProvider + Send + Sync, + R: TrieRecorderProvider + Send + Sync, +{ + type Target = TrieBackend; + + fn deref(&self) -> &Self::Target { + self.backend + } +} + +impl core::fmt::Debug for TrieBackend +where + H: Hasher, + C: TrieCacheProvider, + R: TrieRecorderProvider, { fn fmt(&self, f: &mut sp_std::fmt::Formatter<'_>) -> sp_std::fmt::Result { write!(f, "TrieBackend") @@ -398,17 +511,15 @@ impl, H: Hasher, C: TrieCacheProvider, R: TrieRecord } impl< - S: TrieBackendStorage, H: Hasher, C: TrieCacheProvider + Send + Sync, - R: TrieRecorderProvider + Send + Sync, - > Backend for TrieBackend + R: TrieRecorderProvider + Send + Sync, + > Backend for TrieBackend where H::Out: Ord + Codec, { type Error = crate::DefaultError; - type TrieBackendStorage = S; - type RawIter = crate::trie_backend_essence::RawIter; + type RawIter = crate::trie_backend_essence::RawIter; fn storage_hash(&self, key: &[u8]) -> Result, Self::Error> { self.essence.storage_hash(key) @@ -497,9 +608,9 @@ where fn storage_root<'a>( &self, - delta: impl Iterator)>, + delta: impl Iterator, Option>)>, state_version: StateVersion, - ) -> (H::Out, PrefixedMemoryDB) + ) -> BackendTransaction where H::Out: Ord, { @@ -511,7 +622,7 @@ where child_info: &ChildInfo, delta: impl Iterator)>, state_version: StateVersion, - ) -> (H::Out, bool, PrefixedMemoryDB) + ) -> (BackendTransaction, bool) where H::Out: Ord, { @@ -530,10 +641,11 @@ where } #[cfg(feature = "std")] -impl, H: Hasher, C> AsTrieBackend for TrieBackend { - type TrieBackendStorage = S; - - fn as_trie_backend(&self) -> &TrieBackend { +impl AsTrieBackend for TrieBackend { + fn as_trie_backend(&self) -> &TrieBackend { + self + } + fn as_trie_backend_mut(&mut self) -> &mut TrieBackend { self } } @@ -545,15 +657,15 @@ impl, H: Hasher, C> AsTrieBackend for TrieBackend pub fn create_proof_check_backend( root: H::Out, proof: StorageProof, -) -> Result, H>, Box> +) -> Result, Box> where - H: Hasher, + H: Hasher + 'static, H::Out: Codec, { let db = proof.into_memory_db(); - if db.contains(&root, hash_db::EMPTY_PREFIX) { - Ok(TrieBackendBuilder::new(db, root).build()) + if db.contains(&root, trie_db::node_db::EMPTY_PREFIX) { + Ok(TrieBackendBuilder::new(Box::new(db), root).build()) } else { Err(Box::new(crate::ExecutionError::InvalidProof)) } @@ -570,16 +682,16 @@ pub mod tests { use sp_trie::{ cache::{CacheSize, SharedTrieCache}, trie_types::{TrieDBBuilder, TrieDBMutBuilderV0, TrieDBMutBuilderV1}, - KeySpacedDBMut, PrefixedMemoryDB, Trie, TrieCache, TrieMut, + MemoryDB, Trie, TrieCache, }; use std::iter; use trie_db::NodeCodec; const CHILD_KEY_1: &[u8] = b"sub1"; - type Recorder = sp_trie::recorder::Recorder; - type Cache = LocalTrieCache; - type SharedCache = SharedTrieCache; + type Recorder = sp_trie::recorder::Recorder; + type Cache = LocalTrieCache; + type SharedCache = SharedTrieCache; macro_rules! parameterized_test { ($name:ident, $internal_name:ident) => { @@ -620,24 +732,26 @@ pub mod tests { }; } - pub(crate) fn test_db(state_version: StateVersion) -> (PrefixedMemoryDB, H256) { + pub(crate) fn test_db(state_version: StateVersion) -> (MemoryDB, H256) { let child_info = ChildInfo::new_default(CHILD_KEY_1); - let mut root = H256::default(); - let mut mdb = PrefixedMemoryDB::::default(); - { - let mut mdb = KeySpacedDBMut::new(&mut mdb, child_info.keyspace()); + let mut mdb = MemoryDB::::default(); + let mut root = { match state_version { StateVersion::V0 => { - let mut trie = TrieDBMutBuilderV0::new(&mut mdb, &mut root).build(); + let mut trie = TrieDBMutBuilderV0::new(&mdb).build(); trie.insert(b"value3", &[142; 33]).expect("insert failed"); trie.insert(b"value4", &[124; 33]).expect("insert failed"); + let commit = trie.commit(); + commit.apply_to(&mut mdb) }, StateVersion::V1 => { - let mut trie = TrieDBMutBuilderV1::new(&mut mdb, &mut root).build(); + let mut trie = TrieDBMutBuilderV1::new(&mdb).build(); trie.insert(b"value3", &[142; 33]).expect("insert failed"); trie.insert(b"value4", &[124; 33]).expect("insert failed"); + let commit = trie.commit(); + commit.apply_to(&mut mdb) }, - }; + } }; { @@ -645,7 +759,7 @@ pub mod tests { root.encode_to(&mut sub_root); fn build( - mut trie: sp_trie::TrieDBMut, + trie: &mut sp_trie::TrieDBMut, child_info: &ChildInfo, sub_root: &[u8], ) { @@ -660,14 +774,16 @@ pub mod tests { } } - match state_version { + root = match state_version { StateVersion::V0 => { - let trie = TrieDBMutBuilderV0::new(&mut mdb, &mut root).build(); - build(trie, &child_info, &sub_root[..]) + let mut trie = TrieDBMutBuilderV0::new(&mdb).build(); + build(&mut trie, &child_info, &sub_root[..]); + trie.commit().apply_to(&mut mdb) }, StateVersion::V1 => { - let trie = TrieDBMutBuilderV1::new(&mut mdb, &mut root).build(); - build(trie, &child_info, &sub_root[..]) + let mut trie = TrieDBMutBuilderV1::new(&mdb).build(); + build(&mut trie, &child_info, &sub_root[..]); + trie.commit().apply_to(&mut mdb) }, }; } @@ -677,21 +793,22 @@ pub mod tests { pub(crate) fn test_db_with_hex_keys( state_version: StateVersion, keys: &[&str], - ) -> (PrefixedMemoryDB, H256) { - let mut root = H256::default(); - let mut mdb = PrefixedMemoryDB::::default(); - match state_version { + ) -> (MemoryDB, H256) { + let mut mdb = MemoryDB::::default(); + let root = match state_version { StateVersion::V0 => { - let mut trie = TrieDBMutBuilderV0::new(&mut mdb, &mut root).build(); + let mut trie = TrieDBMutBuilderV0::new(&mut mdb).build(); for (index, key) in keys.iter().enumerate() { trie.insert(&array_bytes::hex2bytes(key).unwrap(), &[index as u8]).unwrap(); } + trie.commit().apply_to(&mut mdb) }, StateVersion::V1 => { - let mut trie = TrieDBMutBuilderV1::new(&mut mdb, &mut root).build(); + let mut trie = TrieDBMutBuilderV1::new(&mut mdb).build(); for (index, key) in keys.iter().enumerate() { trie.insert(&array_bytes::hex2bytes(key).unwrap(), &[index as u8]).unwrap(); } + trie.commit().apply_to(&mut mdb) }, }; (mdb, root) @@ -701,10 +818,10 @@ pub mod tests { hashed_value: StateVersion, cache: Option, recorder: Option, - ) -> TrieBackend, BlakeTwo256> { + ) -> TrieBackend { let (mdb, root) = test_db(hashed_value); - TrieBackendBuilder::new(mdb, root) + TrieBackendBuilder::::new(Box::new(mdb), root) .with_optional_cache(cache) .with_optional_recorder(recorder) .build() @@ -715,10 +832,10 @@ pub mod tests { cache: Option, recorder: Option, keys: &[&str], - ) -> TrieBackend, BlakeTwo256> { + ) -> TrieBackend { let (mdb, root) = test_db_with_hex_keys(hashed_value, keys); - TrieBackendBuilder::new(mdb, root) + TrieBackendBuilder::::new(Box::new(mdb), root) .with_optional_cache(cache) .with_optional_recorder(recorder) .build() @@ -800,8 +917,8 @@ pub mod tests { #[test] fn pairs_are_empty_on_empty_storage() { - assert!(TrieBackendBuilder::, BlakeTwo256>::new( - PrefixedMemoryDB::default(), + assert!(TrieBackendBuilder::::new( + Box::new(MemoryDB::default()), Default::default(), ) .build() @@ -986,7 +1103,7 @@ pub mod tests { assert!( test_trie(state_version, cache, recorder) .storage_root(iter::empty(), state_version) - .0 != H256::repeat_byte(0) + .root_hash() != H256::repeat_byte(0) ); } @@ -999,14 +1116,18 @@ pub mod tests { cache: Option, recorder: Option, ) { - let (new_root, mut tx) = test_trie(state_version, cache, recorder) - .storage_root(iter::once((&b"new-key"[..], Some(&b"new-value"[..]))), state_version); - assert!(!tx.drain().is_empty()); + let tx = test_trie(state_version, cache, recorder).storage_root( + iter::once((&b"new-key"[..], Some(&b"new-value"[..]), None)), + state_version, + ); + let mut mdb = MemoryDB::::default(); + let new_root = tx.apply_to(&mut mdb); + assert!(!mdb.drain().is_empty()); assert!( new_root != test_trie(state_version, None, None) .storage_root(iter::empty(), state_version) - .0 + .root_hash() ); } @@ -1044,9 +1165,8 @@ pub mod tests { recorder: Option, ) { let trie_backend = test_trie(state_version, cache, recorder); - assert!(TrieBackendBuilder::wrap(&trie_backend) - .with_recorder(Recorder::default()) - .build() + assert!(trie_backend + .with_temp_recorder(Recorder::default()) .extract_proof() .unwrap() .is_empty()); @@ -1062,9 +1182,7 @@ pub mod tests { recorder: Option, ) { let trie_backend = test_trie(state_version, cache, recorder); - let backend = TrieBackendBuilder::wrap(&trie_backend) - .with_recorder(Recorder::default()) - .build(); + let backend = trie_backend.with_temp_recorder(Recorder::default()); assert_eq!(backend.storage(b"key").unwrap(), Some(b"value".to_vec())); assert!(!backend.extract_proof().unwrap().is_empty()); } @@ -1078,38 +1196,6 @@ pub mod tests { assert!(result.is_err()); } - parameterized_test!(passes_through_backend_calls, passes_through_backend_calls_inner); - fn passes_through_backend_calls_inner( - state_version: StateVersion, - cache: Option, - recorder: Option, - ) { - let trie_backend = test_trie(state_version, cache, recorder); - let proving_backend = TrieBackendBuilder::wrap(&trie_backend) - .with_recorder(Recorder::default()) - .build(); - assert_eq!(trie_backend.storage(b"key").unwrap(), proving_backend.storage(b"key").unwrap()); - assert_eq!( - trie_backend - .pairs(Default::default()) - .unwrap() - .map(|result| result.unwrap()) - .collect::>(), - proving_backend - .pairs(Default::default()) - .unwrap() - .map(|result| result.unwrap()) - .collect::>() - ); - - let (trie_root, mut trie_mdb) = - trie_backend.storage_root(std::iter::empty(), state_version); - let (proving_root, mut proving_mdb) = - proving_backend.storage_root(std::iter::empty(), state_version); - assert_eq!(trie_root, proving_root); - assert_eq!(trie_mdb.drain(), proving_mdb.drain()); - } - #[test] fn proof_recorded_and_checked_top() { proof_recorded_and_checked_inner(StateVersion::V0); @@ -1122,22 +1208,23 @@ pub mod tests { .clone() .map(|i| (vec![i], Some(vec![i; size_content]))) .collect::>(); + let in_memory = InMemoryBackend::::default(); - let in_memory = in_memory.update(vec![(None, contents)], state_version); - let in_memory_root = in_memory.storage_root(std::iter::empty(), state_version).0; + let mut in_memory = in_memory.update(vec![(None, contents)], state_version).unwrap(); + let in_memory_root = in_memory.storage_root(std::iter::empty(), state_version).root_hash(); value_range.clone().for_each(|i| { assert_eq!(in_memory.storage(&[i]).unwrap().unwrap(), vec![i; size_content]) }); - let trie = in_memory.as_trie_backend(); - let trie_root = trie.storage_root(std::iter::empty(), state_version).0; + let trie = in_memory.as_trie_backend_mut(); + let trie_root = trie.storage_root(std::iter::empty(), state_version).root_hash(); assert_eq!(in_memory_root, trie_root); value_range .clone() .for_each(|i| assert_eq!(trie.storage(&[i]).unwrap().unwrap(), vec![i; size_content])); + // Run multiple times to have a different cache conditions. for cache in [Some(SharedTrieCache::new(CacheSize::unlimited())), None] { - // Run multiple times to have a different cache conditions. for i in 0..5 { if let Some(cache) = &cache { if i == 2 { @@ -1147,10 +1234,8 @@ pub mod tests { } } - let proving = TrieBackendBuilder::wrap(&trie) - .with_recorder(Recorder::default()) - .with_optional_cache(cache.as_ref().map(|c| c.local_cache())) - .build(); + trie.essence.trie_node_cache = cache.as_ref().map(|c| c.local_cache()); + let proving = trie.with_temp_recorder(Recorder::default()); assert_eq!(proving.storage(&[42]).unwrap().unwrap(), vec![42; size_content]); let proof = proving.extract_proof().unwrap(); @@ -1182,20 +1267,20 @@ pub mod tests { let contents = (0..64).map(|i| (vec![i], Some(vec![i]))).collect::>(); let in_memory = InMemoryBackend::::default(); - let in_memory = in_memory.update(vec![(None, contents)], state_version); - let in_memory_root = in_memory.storage_root(std::iter::empty(), state_version).0; + let mut in_memory = + in_memory.update(vec![(None, contents)], state_version).unwrap(); + let in_memory_root = + in_memory.storage_root(std::iter::empty(), state_version).root_hash(); (0..64) .for_each(|i| assert_eq!(in_memory.storage(&[i]).unwrap().unwrap(), vec![i])); - let trie = in_memory.as_trie_backend(); - let trie_root = trie.storage_root(std::iter::empty(), state_version).0; + let trie = in_memory.as_trie_backend_mut(); + let trie_root = trie.storage_root(std::iter::empty(), state_version).root_hash(); assert_eq!(in_memory_root, trie_root); (0..64).for_each(|i| assert_eq!(trie.storage(&[i]).unwrap().unwrap(), vec![i])); - let proving = TrieBackendBuilder::wrap(&trie) - .with_recorder(Recorder::default()) - .with_optional_cache(cache.as_ref().map(|c| c.local_cache())) - .build(); + trie.essence.trie_node_cache = cache.as_ref().map(|c| c.local_cache()); + let proving = trie.with_temp_recorder(Recorder::default()); (0..63).for_each(|i| { assert_eq!(proving.next_storage_key(&[i]).unwrap(), Some(vec![i + 1])) @@ -1229,7 +1314,7 @@ pub mod tests { (Some(child_info_2.clone()), (10..15).map(|i| (vec![i], Some(vec![i]))).collect()), ]; let in_memory = new_in_mem::(); - let in_memory = in_memory.update(contents, state_version); + let mut in_memory = in_memory.update(contents, state_version).unwrap(); let child_storage_keys = vec![child_info_1.to_owned(), child_info_2.to_owned()]; let in_memory_root = in_memory .full_storage_root( @@ -1237,7 +1322,7 @@ pub mod tests { child_storage_keys.iter().map(|k| (k, std::iter::empty())), state_version, ) - .0; + .root_hash(); (0..64).for_each(|i| assert_eq!(in_memory.storage(&[i]).unwrap().unwrap(), vec![i])); (28..65).for_each(|i| { assert_eq!(in_memory.child_storage(child_info_1, &[i]).unwrap().unwrap(), vec![i]) @@ -1259,15 +1344,14 @@ pub mod tests { } } - let trie = in_memory.as_trie_backend(); - let trie_root = trie.storage_root(std::iter::empty(), state_version).0; + let trie = in_memory.as_trie_backend_mut(); + let trie_root = trie.storage_root(std::iter::empty(), state_version).root_hash(); assert_eq!(in_memory_root, trie_root); (0..64).for_each(|i| assert_eq!(trie.storage(&[i]).unwrap().unwrap(), vec![i])); - let proving = TrieBackendBuilder::wrap(&trie) - .with_recorder(Recorder::default()) - .with_optional_cache(cache.as_ref().map(|c| c.local_cache())) - .build(); + trie.essence.trie_node_cache = cache.as_ref().map(|c| c.local_cache()); + let proving = trie.with_temp_recorder(Recorder::default()); + assert_eq!(proving.storage(&[42]).unwrap().unwrap(), vec![42]); let proof = proving.extract_proof().unwrap(); @@ -1280,11 +1364,11 @@ pub mod tests { // note that it is include in root because proof close assert_eq!(proof_check.storage(&[41]).unwrap().unwrap(), vec![41]); assert_eq!(proof_check.storage(&[64]).unwrap(), None); + std::mem::drop(proving); + + trie.essence.trie_node_cache = cache.as_ref().map(|c| c.local_cache()); + let proving = trie.with_temp_recorder(Recorder::default()); - let proving = TrieBackendBuilder::wrap(&trie) - .with_recorder(Recorder::default()) - .with_optional_cache(cache.as_ref().map(|c| c.local_cache())) - .build(); assert_eq!(proving.child_storage(child_info_1, &[64]), Ok(Some(vec![64]))); assert_eq!(proving.child_storage(child_info_1, &[25]), Ok(None)); assert_eq!(proving.child_storage(child_info_2, &[14]), Ok(Some(vec![14]))); @@ -1334,21 +1418,23 @@ pub mod tests { ), ]; let in_memory = new_in_mem::(); - let in_memory = in_memory.update(contents, state_version); + let mut in_memory = in_memory.update(contents, state_version).unwrap(); let child_storage_keys = vec![child_info_1.to_owned()]; - let in_memory_root = in_memory - .full_storage_root( - std::iter::empty(), - child_storage_keys.iter().map(|k| (k, std::iter::empty())), - state_version, - ) - .0; - - let child_1_root = - in_memory.child_storage_root(child_info_1, std::iter::empty(), state_version).0; - let trie = in_memory.as_trie_backend(); + let commit = in_memory.full_storage_root( + std::iter::empty(), + child_storage_keys.iter().map(|k| (k, std::iter::empty())), + state_version, + ); + let in_memory_root = commit.root_hash(); + in_memory.apply_transaction(commit); + + let child_1_root = in_memory + .child_storage_root(child_info_1, std::iter::empty(), state_version) + .0 + .root_hash(); + let trie = in_memory.as_trie_backend_mut(); let nodes = { - let backend = TrieBackendBuilder::wrap(trie).with_recorder(Default::default()).build(); + let backend = trie.with_temp_recorder(Default::default()); let value = backend.child_storage(child_info_1, &[65]).unwrap().unwrap(); let value_hash = BlakeTwo256::hash(&value); assert_eq!(value, vec![65; 128]); @@ -1360,9 +1446,9 @@ pub mod tests { let hash = BlakeTwo256::hash(&node); // Only insert the node/value that contains the important data. if hash != value_hash { - let node = sp_trie::NodeCodec::::decode(&node) + let node = sp_trie::NodeCodec::::decode::(&node, &[]) .unwrap() - .to_owned_node::>() + .to_owned_node::>() .unwrap(); if let Some(data) = node.data() { @@ -1378,14 +1464,16 @@ pub mod tests { nodes }; - let cache = SharedTrieCache::::new(CacheSize::unlimited()); + let cache = SharedTrieCache::::new(CacheSize::unlimited()); { let local_cache = cache.local_cache(); let mut trie_cache = local_cache.as_trie_db_cache(child_1_root); // Put the value/node into the cache. for (hash, node) in nodes { - trie_cache.get_or_insert_node(hash, &mut || Ok(node.clone())).unwrap(); + trie_cache + .get_or_insert_node(hash, Default::default(), &mut || Ok(node.clone())) + .unwrap(); if let Some(data) = node.data() { trie_cache.cache_value_for_key(&[65], (data.clone(), hash).into()); @@ -1395,10 +1483,8 @@ pub mod tests { { // Record the access - let proving = TrieBackendBuilder::wrap(&trie) - .with_recorder(Recorder::default()) - .with_cache(cache.local_cache()) - .build(); + trie.essence.trie_node_cache = Some(cache.local_cache()); + let proving = trie.with_temp_recorder(Recorder::default()); assert_eq!(proving.child_storage(child_info_1, &[65]), Ok(Some(vec![65; 128]))); let proof = proving.extract_proof().unwrap(); @@ -1431,15 +1517,9 @@ pub mod tests { &b"doesnotexist2"[..], ]; - fn check_estimation( - backend: TrieBackend< - impl TrieBackendStorage, - BlakeTwo256, - &'_ LocalTrieCache, - >, - has_cache: bool, - ) { - let estimation = backend.essence.recorder.as_ref().unwrap().estimate_encoded_size(); + fn check_estimation(backend: &TrieBackend, has_cache: bool) { + let estimation = + backend.essence.recorder.read().as_ref().unwrap().estimate_encoded_size(); let storage_proof = backend.extract_proof().unwrap(); let storage_proof_size = storage_proof.into_nodes().into_iter().map(|n| n.encoded_size()).sum::(); @@ -1453,9 +1533,7 @@ pub mod tests { } for n in 0..keys.len() { - let backend = TrieBackendBuilder::wrap(&trie_backend) - .with_recorder(Recorder::default()) - .build(); + let backend = trie_backend.with_temp_recorder(Default::default()); // Read n keys (0..n).for_each(|i| { @@ -1463,12 +1541,12 @@ pub mod tests { }); // Check the estimation - check_estimation(backend, has_cache); + check_estimation(&backend, has_cache); } } #[test] - fn new_data_is_added_to_the_cache() { + fn new_data_is_not_added_to_the_cache() { let shared_cache = SharedTrieCache::new(CacheSize::unlimited()); let new_data = vec![ (&b"new_data0"[..], Some(&b"0"[..])), @@ -1480,17 +1558,18 @@ pub mod tests { let new_root = { let trie = test_trie(StateVersion::V1, Some(shared_cache.local_cache()), None); - trie.storage_root(new_data.clone().into_iter(), StateVersion::V1).0 + trie.storage_root( + new_data.clone().into_iter().map(|(k, v)| (k, v, None)), + StateVersion::V1, + ) + .root_hash() }; let local_cache = shared_cache.local_cache(); let mut cache = local_cache.as_trie_db_cache(new_root); - // All the data should be cached now - for (key, value) in new_data { - assert_eq!( - value.unwrap(), - cache.lookup_value_for_key(key).unwrap().data().flatten().unwrap().as_ref() - ); + // All the data should not be cached now + for (key, _value) in new_data { + assert!(cache.lookup_value_for_key(key).is_none()); } } @@ -1522,7 +1601,7 @@ pub mod tests { (Some(child_info_2.clone()), vec![(key.clone(), Some(child_trie_2_val.clone()))]), ]; let in_memory = new_in_mem::(); - let in_memory = in_memory.update(contents, state_version); + let mut in_memory = in_memory.update(contents, state_version).unwrap(); let child_storage_keys = vec![child_info_1.to_owned(), child_info_2.to_owned()]; let in_memory_root = in_memory .full_storage_root( @@ -1530,7 +1609,7 @@ pub mod tests { child_storage_keys.iter().map(|k| (k, std::iter::empty())), state_version, ) - .0; + .root_hash(); assert_eq!(in_memory.storage(&key).unwrap().unwrap(), top_trie_val); assert_eq!(in_memory.child_storage(child_info_1, &key).unwrap().unwrap(), child_trie_1_val); assert_eq!(in_memory.child_storage(child_info_2, &key).unwrap().unwrap(), child_trie_2_val); @@ -1548,14 +1627,13 @@ pub mod tests { } } - let trie = in_memory.as_trie_backend(); - let trie_root = trie.storage_root(std::iter::empty(), state_version).0; + let trie = in_memory.as_trie_backend_mut(); + let trie_root = trie.storage_root(std::iter::empty(), state_version).root_hash(); assert_eq!(in_memory_root, trie_root); - let proving = TrieBackendBuilder::wrap(&trie) - .with_recorder(Recorder::default()) - .with_optional_cache(cache.as_ref().map(|c| c.local_cache())) - .build(); + trie.essence.trie_node_cache = cache.as_ref().map(|c| c.local_cache()); + let proving = trie.with_temp_recorder(Recorder::default()); + assert_eq!(proving.storage(&key).unwrap().unwrap(), top_trie_val); assert_eq!( proving.child_storage(child_info_1, &key).unwrap().unwrap(), diff --git a/substrate/primitives/state-machine/src/trie_backend_essence.rs b/substrate/primitives/state-machine/src/trie_backend_essence.rs index 3f789111deef..f99dc3e19304 100644 --- a/substrate/primitives/state-machine/src/trie_backend_essence.rs +++ b/substrate/primitives/state-machine/src/trie_backend_essence.rs @@ -19,32 +19,35 @@ //! from storage. use crate::{ - backend::{IterArgs, StorageIterator}, - trie_backend::TrieCacheProvider, + backend::{BackendTransaction, IterArgs, StorageIterator}, + trie_backend::{AsDB, TrieCacheProvider}, warn, StorageKey, StorageValue, }; +use alloc::{boxed::Box, vec::Vec}; use codec::Codec; -use hash_db::{self, AsHashDB, HashDB, HashDBRef, Hasher, Prefix}; +#[cfg(not(feature = "std"))] +use core::cell::RefCell; +use core::marker::PhantomData; #[cfg(feature = "std")] use parking_lot::RwLock; use sp_core::storage::{ChildInfo, ChildType, StateVersion}; -#[cfg(feature = "std")] -use sp_std::sync::Arc; -use sp_std::{boxed::Box, marker::PhantomData, vec::Vec}; use sp_trie::{ child_delta_trie_root, delta_trie_root, empty_child_trie_root, - read_child_trie_first_descedant_value, read_child_trie_hash, read_child_trie_value, - read_trie_first_descedant_value, read_trie_value, + read_child_trie_first_descendant_value, read_child_trie_hash, read_child_trie_value, + read_trie_first_descendant_value, read_trie_value, read_trie_value_with_location, trie_types::{TrieDBBuilder, TrieError}, - DBValue, KeySpacedDB, MerkleValue, NodeCodec, PrefixedMemoryDB, Trie, TrieCache, + ChildChangeset, DBValue, KeySpacedDB, MerkleValue, NodeCodec, Trie, TrieCache, TrieDBRawIterator, TrieRecorder, TrieRecorderProvider, }; #[cfg(feature = "std")] use std::collections::HashMap; +use trie_db::node_db::{Hasher, NodeDB, Prefix}; // In this module, we only use layout for read operation and empty root, // where V1 and V0 are equivalent. use sp_trie::LayoutV1 as Layout; +type Root = sp_trie::Root>; + #[cfg(not(feature = "std"))] macro_rules! format { ( $message:expr, $( $arg:expr )* ) => { @@ -55,18 +58,13 @@ macro_rules! format { }; } -type Result = sp_std::result::Result; - -/// Patricia trie-based storage trait. -pub trait Storage: Send + Sync { - /// Get a trie node. - fn get(&self, key: &H::Out, prefix: Prefix) -> Result>; -} +type Result = core::result::Result; +type DBLocation = sp_trie::DBLocation; /// Local cache for child root. #[cfg(feature = "std")] pub(crate) struct Cache { - pub child_root: HashMap, Option>, + pub child_root: HashMap, Option<(H, DBLocation)>>, } #[cfg(feature = "std")] @@ -83,34 +81,33 @@ enum IterState { } /// A raw iterator over the storage. -pub struct RawIter +pub struct RawIter where H: Hasher, { stop_on_incomplete_database: bool, skip_if_first: Option, - root: H::Out, + root: Root, child_info: Option, - trie_iter: TrieDBRawIterator>, + trie_iter: TrieDBRawIterator>, state: IterState, - _phantom: PhantomData<(S, C, R)>, + _phantom: PhantomData<(C, R)>, } -impl RawIter +impl RawIter where H: Hasher, - S: TrieBackendStorage, H::Out: Codec + Ord, C: TrieCacheProvider + Send + Sync, - R: TrieRecorderProvider + Send + Sync, + R: TrieRecorderProvider + Send + Sync, { #[inline] fn prepare( &mut self, - backend: &TrieBackendEssence, + backend: &TrieBackendEssence, callback: impl FnOnce( - &sp_trie::TrieDB>, - &mut TrieDBRawIterator>, + &sp_trie::TrieDB>, + &mut TrieDBRawIterator>, ) -> Option::Out>>>>, ) -> Option> { if !matches!(self.state, IterState::Pending) { @@ -140,7 +137,7 @@ where } } -impl Default for RawIter +impl Default for RawIter where H: Hasher, { @@ -157,15 +154,14 @@ where } } -impl StorageIterator for RawIter +impl StorageIterator for RawIter where H: Hasher, - S: TrieBackendStorage, H::Out: Codec + Ord, C: TrieCacheProvider + Send + Sync, - R: TrieRecorderProvider + Send + Sync, + R: TrieRecorderProvider + Send + Sync, { - type Backend = crate::TrieBackend; + type Backend = crate::TrieBackend; type Error = crate::DefaultError; #[inline] @@ -206,38 +202,54 @@ where } /// Patricia trie-based pairs storage essence. -pub struct TrieBackendEssence, H: Hasher, C, R> { - storage: S, +pub struct TrieBackendEssence { + pub(crate) storage: Box>, root: H::Out, empty: H::Out, #[cfg(feature = "std")] - pub(crate) cache: Arc>>, + pub(crate) cache: RwLock>, pub(crate) trie_node_cache: Option, - pub(crate) recorder: Option, + #[cfg(feature = "std")] + pub(crate) recorder: RwLock>, + #[cfg(not(feature = "std"))] + pub(crate) recorder: RefCell>, } -impl, H: Hasher, C, R> TrieBackendEssence { +#[cfg(not(feature = "std"))] +unsafe impl Send for TrieBackendEssence {} + +#[cfg(not(feature = "std"))] +unsafe impl Sync for TrieBackendEssence {} + +impl TrieBackendEssence +where + H: Hasher, + R: TrieRecorderProvider, +{ /// Create new trie-based backend. - pub fn new(storage: S, root: H::Out) -> Self { + pub fn new(storage: Box>, root: H::Out) -> Self { Self::new_with_cache(storage, root, None) } /// Create new trie-based backend. - pub fn new_with_cache(storage: S, root: H::Out, cache: Option) -> Self { + pub fn new_with_cache(storage: Box>, root: H::Out, cache: Option) -> Self { TrieBackendEssence { storage, root, empty: H::hash(&[0u8]), #[cfg(feature = "std")] - cache: Arc::new(RwLock::new(Cache::new())), + cache: RwLock::new(Cache::new()), trie_node_cache: cache, - recorder: None, + #[cfg(feature = "std")] + recorder: RwLock::new(None), + #[cfg(not(feature = "std"))] + recorder: RefCell::new(None), } } /// Create new trie-based backend. pub fn new_with_cache_and_recorder( - storage: S, + storage: Box>, root: H::Out, cache: Option, recorder: Option, @@ -247,22 +259,23 @@ impl, H: Hasher, C, R> TrieBackendEssence { root, empty: H::hash(&[0u8]), #[cfg(feature = "std")] - cache: Arc::new(RwLock::new(Cache::new())), + cache: RwLock::new(Cache::new()), trie_node_cache: cache, - recorder, + #[cfg(feature = "std")] + recorder: RwLock::new(recorder), + #[cfg(not(feature = "std"))] + recorder: RefCell::new(recorder), } } /// Get backend storage reference. - pub fn backend_storage(&self) -> &S { - &self.storage + pub fn backend_storage(&self) -> &dyn AsDB { + &*self.storage } - /// Get backend storage mutable reference. - pub fn backend_storage_mut(&mut self) -> &mut S { - &mut self.storage + pub fn backend_storage_mut(&mut self) -> &mut dyn AsDB { + &mut *self.storage } - /// Get trie root. pub fn root(&self) -> &H::Out { &self.root @@ -275,22 +288,33 @@ impl, H: Hasher, C, R> TrieBackendEssence { self.root = root; } + /// Set recorder. Returns old recorder if any. + pub fn set_recorder(&self, recorder: Option) -> Option { + if recorder.is_some() { + // TODO try without reset. + self.reset_cache(); + } + #[cfg(feature = "std")] + let r = core::mem::replace(&mut *self.recorder.write(), recorder); + #[cfg(not(feature = "std"))] + let r = core::mem::replace(&mut *self.recorder.borrow_mut(), recorder); + r + } + #[cfg(feature = "std")] - fn reset_cache(&mut self) { - self.cache = Arc::new(RwLock::new(Cache::new())); + fn reset_cache(&self) { + *self.cache.write() = Cache::new(); } #[cfg(not(feature = "std"))] - fn reset_cache(&mut self) {} - - /// Consumes self and returns underlying storage. - pub fn into_storage(self) -> S { - self.storage - } + fn reset_cache(&self) {} } -impl, H: Hasher, C: TrieCacheProvider, R: TrieRecorderProvider> - TrieBackendEssence +impl TrieBackendEssence +where + H: Hasher, + C: TrieCacheProvider, + R: TrieRecorderProvider, { /// Call the given closure passing it the recorder and the cache. /// @@ -300,17 +324,21 @@ impl, H: Hasher, C: TrieCacheProvider, R: TrieRecord &self, storage_root: Option, callback: impl FnOnce( - Option<&mut dyn TrieRecorder>, - Option<&mut dyn TrieCache>>, + Option<&mut dyn TrieRecorder>, + Option<&mut dyn TrieCache, DBLocation>>, ) -> RE, ) -> RE { let storage_root = storage_root.unwrap_or_else(|| self.root); let mut cache = self.trie_node_cache.as_ref().map(|c| c.as_trie_db_cache(storage_root)); let cache = cache.as_mut().map(|c| c as _); - let mut recorder = self.recorder.as_ref().map(|r| r.as_trie_recorder(storage_root)); + #[cfg(feature = "std")] + let recorder = self.recorder.read(); + #[cfg(not(feature = "std"))] + let recorder = self.recorder.borrow(); + let mut recorder = recorder.as_ref().map(|r| r.as_trie_recorder(storage_root)); let recorder = match recorder.as_mut() { - Some(recorder) => Some(recorder as &mut dyn TrieRecorder), + Some(recorder) => Some(recorder as &mut dyn TrieRecorder), None => None, }; callback(recorder, cache) @@ -327,14 +355,18 @@ impl, H: Hasher, C: TrieCacheProvider, R: TrieRecord &self, storage_root: Option, callback: impl FnOnce( - Option<&mut dyn TrieRecorder>, - Option<&mut dyn TrieCache>>, + Option<&mut dyn TrieRecorder>, + Option<&mut dyn TrieCache, DBLocation>>, ) -> (Option, RE), ) -> RE { let storage_root = storage_root.unwrap_or_else(|| self.root); - let mut recorder = self.recorder.as_ref().map(|r| r.as_trie_recorder(storage_root)); + #[cfg(feature = "std")] + let recorder = self.recorder.read(); + #[cfg(not(feature = "std"))] + let recorder = self.recorder.borrow(); + let mut recorder = recorder.as_ref().map(|r| r.as_trie_recorder(storage_root)); let recorder = match recorder.as_mut() { - Some(recorder) => Some(recorder as &mut dyn TrieRecorder), + Some(recorder) => Some(recorder as &mut dyn TrieRecorder), None => None, }; @@ -356,32 +388,30 @@ impl, H: Hasher, C: TrieCacheProvider, R: TrieRecord } } -impl< - S: TrieBackendStorage, - H: Hasher, - C: TrieCacheProvider + Send + Sync, - R: TrieRecorderProvider + Send + Sync, - > TrieBackendEssence +impl TrieBackendEssence where + H: Hasher, H::Out: Codec + Ord, + C: TrieCacheProvider + Send + Sync, + R: TrieRecorderProvider + Send + Sync, { /// Calls the given closure with a [`TrieDb`] constructed for the given /// storage root and (optionally) child trie. #[inline] fn with_trie_db( &self, - root: H::Out, + root: Root, child_info: Option<&ChildInfo>, - callback: impl FnOnce(&sp_trie::TrieDB>) -> RE, + callback: impl FnOnce(&sp_trie::TrieDB>) -> RE, ) -> RE { - let backend = self as &dyn HashDBRef>; + let backend = self as &dyn NodeDB, DBLocation>; let db = child_info .as_ref() .map(|child_info| KeySpacedDB::new(backend, child_info.keyspace())); - let db = db.as_ref().map(|db| db as &dyn HashDBRef>).unwrap_or(backend); + let db = db.as_ref().map(|db| db as &dyn NodeDB<_, _, _>).unwrap_or(backend); - self.with_recorder_and_cache(Some(root), |recorder, cache| { - let trie = TrieDBBuilder::::new(db, &root) + self.with_recorder_and_cache(Some(root.0), |recorder, cache| { + let trie = TrieDBBuilder::::new_with_db_location(db, &root.0, root.1) .with_optional_recorder(recorder) .with_optional_cache(cache) .build(); @@ -398,11 +428,11 @@ where /// the next key through an iterator. #[cfg(debug_assertions)] pub fn next_storage_key_slow(&self, key: &[u8]) -> Result> { - self.next_storage_key_from_root(&self.root, None, key) + self.next_storage_key_from_root(&(self.root, Default::default()), None, key) } /// Access the root of the child storage in its parent trie - fn child_root(&self, child_info: &ChildInfo) -> Result> { + fn child_root(&self, child_info: &ChildInfo) -> Result>> { #[cfg(feature = "std")] { if let Some(result) = self.cache.read().child_root.get(child_info.storage_key()) { @@ -410,13 +440,25 @@ where } } - let result = self.storage(child_info.prefixed_storage_key().as_slice())?.map(|r| { + let map_e = |e| format!("Trie lookup with location error: {}", e); + let result = self.with_recorder_and_cache(None, |recorder, cache| { + read_trie_value_with_location::, _>( + self, + &self.root, + child_info.prefixed_storage_key().as_slice(), + recorder, + cache, + ) + .map_err(map_e) + }); + + let result = result?.map(|r| { let mut hash = H::Out::default(); // root is fetched from DB, not writable by runtime, so it's always valid. - hash.as_mut().copy_from_slice(&r[..]); + hash.as_mut().copy_from_slice(&r.0[..]); - hash + (hash, r.1) }); #[cfg(feature = "std")] @@ -445,7 +487,7 @@ where /// Return next key from main trie or child trie by providing corresponding root. fn next_storage_key_from_root( &self, - root: &H::Out, + root: &Root, child_info: Option<&ChildInfo>, key: &[u8], ) -> Result> { @@ -496,7 +538,8 @@ where let map_e = |e| format!("Trie lookup error: {}", e); self.with_recorder_and_cache(None, |recorder, cache| { - read_trie_value::, _>(self, &self.root, key, recorder, cache).map_err(map_e) + read_trie_value::, _>(self, &self.root, key, recorder, cache) + .map_err(map_e) }) } @@ -509,8 +552,8 @@ where let map_e = |e| format!("Trie lookup error: {}", e); - self.with_recorder_and_cache(Some(child_root), |recorder, cache| { - read_child_trie_hash::, _>( + self.with_recorder_and_cache(Some(child_root.0), |recorder, cache| { + read_child_trie_hash::>( child_info.keyspace(), self, &child_root, @@ -535,8 +578,8 @@ where let map_e = |e| format!("Trie lookup error: {}", e); - self.with_recorder_and_cache(Some(child_root), |recorder, cache| { - read_child_trie_value::, _>( + self.with_recorder_and_cache(Some(child_root.0), |recorder, cache| { + read_child_trie_value::>( child_info.keyspace(), self, &child_root, @@ -553,8 +596,10 @@ where let map_e = |e| format!("Trie lookup error: {}", e); self.with_recorder_and_cache(None, |recorder, cache| { - read_trie_first_descedant_value::, _>(self, &self.root, key, recorder, cache) - .map_err(map_e) + read_trie_first_descendant_value::, _>( + self, &self.root, key, recorder, cache, + ) + .map_err(map_e) }) } @@ -568,8 +613,8 @@ where let map_e = |e| format!("Trie lookup error: {}", e); - self.with_recorder_and_cache(Some(child_root), |recorder, cache| { - read_child_trie_first_descedant_value::, _>( + self.with_recorder_and_cache(Some(child_root.0), |recorder, cache| { + read_child_trie_first_descendant_value::, _>( child_info.keyspace(), self, &child_root, @@ -582,7 +627,7 @@ where } /// Create a raw iterator over the storage. - pub fn raw_iter(&self, args: IterArgs) -> Result> { + pub fn raw_iter(&self, args: IterArgs) -> Result> { let root = if let Some(child_info) = args.child_info.as_ref() { let root = match self.child_root(&child_info)? { Some(root) => root, @@ -590,7 +635,7 @@ where }; root } else { - self.root + (self.root, Default::default()) }; if self.root == Default::default() { @@ -627,32 +672,40 @@ where /// Return the storage root after applying the given `delta`. pub fn storage_root<'a>( &self, - delta: impl Iterator)>, + delta: impl Iterator, Option>)>, state_version: StateVersion, - ) -> (H::Out, PrefixedMemoryDB) { - let mut write_overlay = PrefixedMemoryDB::default(); - - let root = self.with_recorder_and_cache_for_storage_root(None, |recorder, cache| { - let mut eph = Ephemeral::new(self.backend_storage(), &mut write_overlay); - let res = match state_version { - StateVersion::V0 => delta_trie_root::, _, _, _, _, _>( - &mut eph, self.root, delta, recorder, cache, - ), - StateVersion::V1 => delta_trie_root::, _, _, _, _, _>( - &mut eph, self.root, delta, recorder, cache, - ), + ) -> BackendTransaction { + self.with_recorder_and_cache_for_storage_root(None, |recorder, cache| { + let backend = self as &dyn NodeDB, DBLocation>; + let commit = match state_version { + StateVersion::V0 => + delta_trie_root::, _, _, _, _>( + backend, + (self.root, Default::default()), + delta, + recorder, + cache, + None, + ), + StateVersion::V1 => + delta_trie_root::, _, _, _, _>( + backend, + (self.root, Default::default()), + delta, + recorder, + cache, + None, + ), }; - match res { - Ok(ret) => (Some(ret), ret), + match commit { + Ok(commit) => (Some(commit.root_hash()), commit), Err(e) => { warn!(target: "trie", "Failed to write to trie: {}", e); - (None, self.root) + (None, BackendTransaction::unchanged(self.root)) }, } - }); - - (root, write_overlay) + }) } /// Returns the child storage root for the child trie `child_info` after applying the given @@ -662,214 +715,81 @@ where child_info: &ChildInfo, delta: impl Iterator)>, state_version: StateVersion, - ) -> (H::Out, bool, PrefixedMemoryDB) { + ) -> (BackendTransaction, bool) { let default_root = match child_info.child_type() { - ChildType::ParentKeyId => empty_child_trie_root::>(), + ChildType::ParentKeyId => empty_child_trie_root::>(), }; - let mut write_overlay = PrefixedMemoryDB::default(); let child_root = match self.child_root(child_info) { - Ok(Some(hash)) => hash, - Ok(None) => default_root, + Ok(Some(root)) => root, + Ok(None) => (default_root, Default::default()), Err(e) => { warn!(target: "trie", "Failed to read child storage root: {}", e); - default_root + (default_root, Default::default()) }, }; - let new_child_root = - self.with_recorder_and_cache_for_storage_root(Some(child_root), |recorder, cache| { - let mut eph = Ephemeral::new(self.backend_storage(), &mut write_overlay); + let commit = + self.with_recorder_and_cache_for_storage_root(Some(child_root.0), |recorder, cache| { + let backend = self as &dyn NodeDB, DBLocation>; match match state_version { StateVersion::V0 => - child_delta_trie_root::, _, _, _, _, _, _>( + child_delta_trie_root::, _, _, _, _, _>( child_info.keyspace(), - &mut eph, - child_root, + backend, + child_root.0, + child_root.1, delta, recorder, cache, ), StateVersion::V1 => - child_delta_trie_root::, _, _, _, _, _, _>( + child_delta_trie_root::, _, _, _, _, _>( child_info.keyspace(), - &mut eph, - child_root, + backend, + child_root.0, + child_root.1, delta, recorder, cache, ), } { - Ok(ret) => (Some(ret), ret), + Ok(commit) => (Some(commit.root_hash()), commit), Err(e) => { warn!(target: "trie", "Failed to write to trie: {}", e); - (None, child_root) + (None, BackendTransaction::unchanged(self.root)) }, } }); - let is_default = new_child_root == default_root; - - (new_child_root, is_default, write_overlay) - } -} - -pub(crate) struct Ephemeral<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> { - storage: &'a S, - overlay: &'a mut PrefixedMemoryDB, -} - -impl<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> AsHashDB - for Ephemeral<'a, S, H> -{ - fn as_hash_db<'b>(&'b self) -> &'b (dyn HashDB + 'b) { - self - } - fn as_hash_db_mut<'b>(&'b mut self) -> &'b mut (dyn HashDB + 'b) { - self - } -} - -impl<'a, S: TrieBackendStorage, H: Hasher> Ephemeral<'a, S, H> { - pub fn new(storage: &'a S, overlay: &'a mut PrefixedMemoryDB) -> Self { - Ephemeral { storage, overlay } - } -} - -impl<'a, S: 'a + TrieBackendStorage, H: Hasher> hash_db::HashDB - for Ephemeral<'a, S, H> -{ - fn get(&self, key: &H::Out, prefix: Prefix) -> Option { - HashDB::get(self.overlay, key, prefix).or_else(|| { - self.storage.get(key, prefix).unwrap_or_else(|e| { - warn!(target: "trie", "Failed to read from DB: {}", e); - None - }) - }) - } - - fn contains(&self, key: &H::Out, prefix: Prefix) -> bool { - HashDB::get(self, key, prefix).is_some() - } - - fn insert(&mut self, prefix: Prefix, value: &[u8]) -> H::Out { - HashDB::insert(self.overlay, prefix, value) - } - - fn emplace(&mut self, key: H::Out, prefix: Prefix, value: DBValue) { - HashDB::emplace(self.overlay, key, prefix, value) - } - - fn remove(&mut self, key: &H::Out, prefix: Prefix) { - HashDB::remove(self.overlay, key, prefix) - } -} + let is_default = commit.root_hash() == default_root; -impl<'a, S: 'a + TrieBackendStorage, H: Hasher> HashDBRef for Ephemeral<'a, S, H> { - fn get(&self, key: &H::Out, prefix: Prefix) -> Option { - HashDB::get(self, key, prefix) + (commit, is_default) } - - fn contains(&self, key: &H::Out, prefix: Prefix) -> bool { - HashDB::contains(self, key, prefix) - } -} - -/// Key-value pairs storage that is used by trie backend essence. -pub trait TrieBackendStorage: Send + Sync { - /// Get the value stored at key. - fn get(&self, key: &H::Out, prefix: Prefix) -> Result>; } -impl, H: Hasher> TrieBackendStorage for &T { - fn get(&self, key: &H::Out, prefix: Prefix) -> Result> { - (*self).get(key, prefix) - } -} - -// This implementation is used by normal storage trie clients. -#[cfg(feature = "std")] -impl TrieBackendStorage for Arc> { - fn get(&self, key: &H::Out, prefix: Prefix) -> Result> { - Storage::::get(std::ops::Deref::deref(self), key, prefix) - } -} - -impl TrieBackendStorage for sp_trie::GenericMemoryDB +impl NodeDB for TrieBackendEssence where H: Hasher, - KF: sp_trie::KeyFunction + Send + Sync, -{ - fn get(&self, key: &H::Out, prefix: Prefix) -> Result> { - Ok(hash_db::HashDB::get(self, key, prefix)) - } -} - -impl< - S: TrieBackendStorage, - H: Hasher, - C: TrieCacheProvider + Send + Sync, - R: TrieRecorderProvider + Send + Sync, - > AsHashDB for TrieBackendEssence -{ - fn as_hash_db<'b>(&'b self) -> &'b (dyn HashDB + 'b) { - self - } - - fn as_hash_db_mut<'b>(&'b mut self) -> &'b mut (dyn HashDB + 'b) { - self - } -} - -impl< - S: TrieBackendStorage, - H: Hasher, - C: TrieCacheProvider + Send + Sync, - R: TrieRecorderProvider + Send + Sync, - > HashDB for TrieBackendEssence + C: TrieCacheProvider + Send + Sync, + R: TrieRecorderProvider + Send + Sync, { - fn get(&self, key: &H::Out, prefix: Prefix) -> Option { + fn get( + &self, + key: &H::Out, + prefix: Prefix, + location: DBLocation, + ) -> Option<(DBValue, Vec)> { if *key == self.empty { - return Some([0u8].to_vec()) - } - match self.storage.get(key, prefix) { - Ok(x) => x, - Err(e) => { - warn!(target: "trie", "Failed to read from DB: {}", e); - None - }, + return Some(([0u8].to_vec(), Default::default())) } + self.storage.get(key, prefix, location) } - fn contains(&self, key: &H::Out, prefix: Prefix) -> bool { - HashDB::get(self, key, prefix).is_some() - } - - fn insert(&mut self, _prefix: Prefix, _value: &[u8]) -> H::Out { - unimplemented!(); - } - - fn emplace(&mut self, _key: H::Out, _prefix: Prefix, _value: DBValue) { - unimplemented!(); - } - - fn remove(&mut self, _key: &H::Out, _prefix: Prefix) { - unimplemented!(); - } -} - -impl< - S: TrieBackendStorage, - H: Hasher, - C: TrieCacheProvider + Send + Sync, - R: TrieRecorderProvider + Send + Sync, - > HashDBRef for TrieBackendEssence -{ - fn get(&self, key: &H::Out, prefix: Prefix) -> Option { - HashDB::get(self, key, prefix) - } - - fn contains(&self, key: &H::Out, prefix: Prefix) -> bool { - HashDB::contains(self, key, prefix) + fn contains(&self, key: &H::Out, prefix: Prefix, location: DBLocation) -> bool { + if *key == self.empty { + return true + } + self.storage.contains(key, prefix, location) } } @@ -877,48 +797,45 @@ impl< mod test { use super::*; use crate::{Backend, TrieBackend}; - use sp_core::{Blake2Hasher, H256}; + use sp_core::Blake2Hasher; use sp_trie::{ - cache::LocalTrieCache, trie_types::TrieDBMutBuilderV1 as TrieDBMutBuilder, KeySpacedDBMut, - PrefixedMemoryDB, TrieMut, + cache::LocalTrieCache, trie_types::TrieDBMutBuilderV1 as TrieDBMutBuilder, KeySpacedDB, + PrefixedMemoryDB, }; #[test] fn next_storage_key_and_next_child_storage_key_work() { + // TODO also test on mem-tree-db let child_info = ChildInfo::new_default(b"MyChild"); let child_info = &child_info; // Contains values - let mut root_1 = H256::default(); - // Contains child trie - let mut root_2 = H256::default(); - let mut mdb = PrefixedMemoryDB::::default(); - { - let mut trie = TrieDBMutBuilder::new(&mut mdb, &mut root_1).build(); - trie.insert(b"3", &[1]).expect("insert failed"); - trie.insert(b"4", &[1]).expect("insert failed"); - trie.insert(b"6", &[1]).expect("insert failed"); - } - { - let mut mdb = KeySpacedDBMut::new(&mut mdb, child_info.keyspace()); - // reuse of root_1 implicitly assert child trie root is same - // as top trie (contents must remain the same). - let mut trie = TrieDBMutBuilder::new(&mut mdb, &mut root_1).build(); - trie.insert(b"3", &[1]).expect("insert failed"); - trie.insert(b"4", &[1]).expect("insert failed"); - trie.insert(b"6", &[1]).expect("insert failed"); - } - { - let mut trie = TrieDBMutBuilder::new(&mut mdb, &mut root_2).build(); - trie.insert(child_info.prefixed_storage_key().as_slice(), root_1.as_ref()) - .expect("insert failed"); - }; + let mut trie = TrieDBMutBuilder::new(&mdb).build(); + trie.insert(b"3", &[1]).expect("insert failed"); + trie.insert(b"4", &[1]).expect("insert failed"); + trie.insert(b"6", &[1]).expect("insert failed"); + let mut _root_1 = trie.commit().apply_to(&mut mdb); + let kdb = KeySpacedDB::new(&mdb, child_info.keyspace()); + // implicitly assert child trie root is same + // as top trie (contents must remain the same). + let mut trie = TrieDBMutBuilder::new(&kdb).build(); + trie.insert(b"3", &[1]).expect("insert failed"); + trie.insert(b"4", &[1]).expect("insert failed"); + trie.insert(b"6", &[1]).expect("insert failed"); + let commit = trie.commit_with_keyspace(child_info.keyspace()); + let root_1 = commit.apply_to(&mut mdb); - let essence_1 = - TrieBackendEssence::<_, _, LocalTrieCache<_>, sp_trie::recorder::Recorder<_>>::new( - mdb, root_1, - ); - let mdb = essence_1.backend_storage().clone(); + // Contains child trie + let mut trie = TrieDBMutBuilder::new(&mut mdb).build(); + trie.insert(child_info.prefixed_storage_key().as_slice(), root_1.as_ref()) + .expect("insert failed"); + let root_2 = trie.commit().apply_to(&mut mdb); + + let essence_1 = TrieBackendEssence::< + _, + LocalTrieCache<_, _>, + sp_trie::recorder::Recorder<_, _>, + >::new(Box::new(mdb), root_1); let essence_1 = TrieBackend::from_essence(essence_1); assert_eq!(essence_1.next_storage_key(b"2"), Ok(Some(b"3".to_vec()))); @@ -927,10 +844,12 @@ mod test { assert_eq!(essence_1.next_storage_key(b"5"), Ok(Some(b"6".to_vec()))); assert_eq!(essence_1.next_storage_key(b"6"), Ok(None)); - let essence_2 = - TrieBackendEssence::<_, _, LocalTrieCache<_>, sp_trie::recorder::Recorder<_>>::new( - mdb, root_2, - ); + let mdb = essence_1.backend_storage().as_prefixed_mem_db().unwrap().clone(); + let essence_2 = TrieBackendEssence::< + _, + LocalTrieCache<_, DBLocation>, + sp_trie::recorder::Recorder<_, _>, + >::new(Box::new(mdb), root_2); assert_eq!(essence_2.next_child_storage_key(child_info, b"2"), Ok(Some(b"3".to_vec()))); assert_eq!(essence_2.next_child_storage_key(child_info, b"3"), Ok(Some(b"4".to_vec()))); diff --git a/substrate/primitives/transaction-storage-proof/src/lib.rs b/substrate/primitives/transaction-storage-proof/src/lib.rs index 9d540ae68d16..ea4db39b9bf3 100644 --- a/substrate/primitives/transaction-storage-proof/src/lib.rs +++ b/substrate/primitives/transaction-storage-proof/src/lib.rs @@ -140,10 +140,9 @@ pub trait IndexedBody { pub mod registration { use super::*; use sp_runtime::traits::{Block as BlockT, One, Saturating, Zero}; - use sp_trie::TrieMut; type Hasher = sp_core::Blake2Hasher; - type TrieLayout = sp_trie::LayoutV1; + type TrieLayout = sp_trie::LayoutV1; /// Create a new inherent data provider instance for a given parent block hash. pub fn new_data_provider( @@ -197,23 +196,19 @@ pub mod registration { // Generate tries for each transaction. let mut chunk_index = 0; for transaction in transactions { - let mut transaction_root = sp_trie::empty_trie_root::(); - { - let mut trie = - sp_trie::TrieDBMutBuilder::::new(&mut db, &mut transaction_root) - .build(); - let chunks = transaction.chunks(CHUNK_SIZE).map(|c| c.to_vec()); - for (index, chunk) in chunks.enumerate() { - let index = encode_index(index as u32); - trie.insert(&index, &chunk).map_err(|e| Error::Application(Box::new(e)))?; - if chunk_index == target_chunk_index { - target_chunk = Some(chunk); - target_chunk_key = index; - } - chunk_index += 1; + let mut trie = sp_trie::TrieDBMutBuilder::::new(&mut db).build(); + let chunks = transaction.chunks(CHUNK_SIZE).map(|c| c.to_vec()); + for (index, chunk) in chunks.enumerate() { + let index = encode_index(index as u32); + trie.insert(&index, &chunk).map_err(|e| Error::Application(Box::new(e)))?; + if chunk_index == target_chunk_index { + target_chunk = Some(chunk); + target_chunk_key = index; } - trie.commit(); + chunk_index += 1; } + let transaction_root = trie.commit().apply_to(&mut db); + if target_chunk.is_some() && target_root == Default::default() { target_root = transaction_root; chunk_proof = sp_trie::generate_trie_proof::( diff --git a/substrate/primitives/trie/Cargo.toml b/substrate/primitives/trie/Cargo.toml index 16d3ca19a179..8ea22b813e1b 100644 --- a/substrate/primitives/trie/Cargo.toml +++ b/substrate/primitives/trie/Cargo.toml @@ -23,17 +23,14 @@ harness = false [dependencies] ahash = { version = "0.8.2", optional = true } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } -hash-db = { version = "0.16.0", default-features = false } lazy_static = { version = "1.4.0", optional = true } -memory-db = { version = "0.32.0", default-features = false } nohash-hasher = { version = "0.2.0", optional = true } parking_lot = { version = "0.12.1", optional = true } rand = { version = "0.8", optional = true } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } thiserror = { optional = true, workspace = true } tracing = { version = "0.1.29", optional = true } -trie-db = { version = "0.28.0", default-features = false } -trie-root = { version = "0.18.0", default-features = false } +trie-db = { package = "subtrie", version = "0.0.1", default-features = false } sp-core = { path = "../core", default-features = false } sp-std = { path = "../std", default-features = false } sp-externalities = { path = "../externalities", default-features = false } @@ -41,9 +38,8 @@ schnellru = { version = "0.2.1", optional = true } [dev-dependencies] array-bytes = "6.1" -criterion = "0.4.0" -trie-bench = "0.38.0" -trie-standardmap = "0.16.0" +criterion = "0.5.1" +trie-db = { package = "subtrie", version = "0.0.1", features = ["bench"] } sp-runtime = { path = "../runtime" } [features] @@ -51,9 +47,7 @@ default = ["std"] std = [ "ahash", "codec/std", - "hash-db/std", "lazy_static", - "memory-db/std", "nohash-hasher", "parking_lot", "rand", @@ -66,5 +60,4 @@ std = [ "thiserror", "tracing", "trie-db/std", - "trie-root/std", ] diff --git a/substrate/primitives/trie/benches/bench.rs b/substrate/primitives/trie/benches/bench.rs index 35aa0b808930..9bf58194ab5d 100644 --- a/substrate/primitives/trie/benches/bench.rs +++ b/substrate/primitives/trie/benches/bench.rs @@ -20,12 +20,12 @@ criterion_group!(benches, benchmark); criterion_main!(benches); fn benchmark(c: &mut Criterion) { - trie_bench::standard_benchmark::< - sp_trie::LayoutV1, + trie_db::bench::standard_benchmark::< + sp_trie::LayoutV1, sp_trie::TrieStream, >(c, "substrate-blake2"); - trie_bench::standard_benchmark::< - sp_trie::LayoutV1, + trie_db::bench::standard_benchmark::< + sp_trie::LayoutV1, sp_trie::TrieStream, >(c, "substrate-keccak"); } diff --git a/substrate/primitives/trie/src/cache/mod.rs b/substrate/primitives/trie/src/cache/mod.rs index 01f08a78adcf..0c3682102fcc 100644 --- a/substrate/primitives/trie/src/cache/mod.rs +++ b/substrate/primitives/trie/src/cache/mod.rs @@ -35,20 +35,19 @@ //! [`LocalTrieCache`] the actual memory usage could be above the allowed maximum. use crate::{Error, NodeCodec}; -use hash_db::Hasher; use nohash_hasher::BuildNoHashHasher; use parking_lot::{Mutex, MutexGuard}; use schnellru::LruMap; use shared_cache::{ValueCacheKey, ValueCacheRef}; use std::{ - collections::HashMap, + collections::{HashMap, HashSet}, sync::{ atomic::{AtomicU64, Ordering}, Arc, }, time::Duration, }; -use trie_db::{node::NodeOwned, CachedValue}; +use trie_db::{node::NodeOwned, node_db::Hasher, CachedValue}; mod shared_cache; @@ -124,11 +123,11 @@ pub struct LocalNodeCacheLimiter { current_heap_size: usize, } -impl schnellru::Limiter> for LocalNodeCacheLimiter +impl schnellru::Limiter<(H, L), NodeCached> for LocalNodeCacheLimiter where H: AsRef<[u8]> + std::fmt::Debug, { - type KeyToInsert<'a> = H; + type KeyToInsert<'a> = (H, L); type LinkType = u32; #[inline] @@ -146,9 +145,9 @@ where fn on_insert<'a>( &mut self, _length: usize, - key: H, - cached_node: NodeCached, - ) -> Option<(H, NodeCached)> { + key: (H, L), + cached_node: NodeCached, + ) -> Option<((H, L), NodeCached)> { self.current_heap_size += cached_node.heap_size(); Some((key, cached_node)) } @@ -157,19 +156,18 @@ where fn on_replace( &mut self, _length: usize, - _old_key: &mut H, - _new_key: H, - old_node: &mut NodeCached, - new_node: &mut NodeCached, + _old_key: &mut (H, L), + _new_key: (H, L), + old_node: &mut NodeCached, + new_node: &mut NodeCached, ) -> bool { - debug_assert_eq!(_old_key.as_ref().len(), _new_key.as_ref().len()); self.current_heap_size = self.current_heap_size + new_node.heap_size() - old_node.heap_size(); true } #[inline] - fn on_removed(&mut self, _key: &mut H, cached_node: &mut NodeCached) { + fn on_removed(&mut self, _key: &mut (H, L), cached_node: &mut NodeCached) { self.current_heap_size -= cached_node.heap_size(); } @@ -193,7 +191,7 @@ pub struct LocalValueCacheLimiter { current_heap_size: usize, } -impl schnellru::Limiter, CachedValue> for LocalValueCacheLimiter +impl schnellru::Limiter, CachedValue> for LocalValueCacheLimiter where H: AsRef<[u8]>, { @@ -216,8 +214,8 @@ where &mut self, _length: usize, key: Self::KeyToInsert<'_>, - value: CachedValue, - ) -> Option<(ValueCacheKey, CachedValue)> { + value: CachedValue, + ) -> Option<(ValueCacheKey, CachedValue)> { self.current_heap_size += key.storage_key.len(); Some((key.into(), value)) } @@ -228,15 +226,15 @@ where _length: usize, _old_key: &mut ValueCacheKey, _new_key: ValueCacheRef, - _old_value: &mut CachedValue, - _new_value: &mut CachedValue, + _old_value: &mut CachedValue, + _new_value: &mut CachedValue, ) -> bool { debug_assert_eq!(_old_key.storage_key.len(), _new_key.storage_key.len()); true } #[inline] - fn on_removed(&mut self, key: &mut ValueCacheKey, _: &mut CachedValue) { + fn on_removed(&mut self, key: &mut ValueCacheKey, _: &mut CachedValue) { self.current_heap_size -= key.storage_key.len(); } @@ -293,25 +291,26 @@ struct TrieHitStats { } /// An internal struct to store the cached trie nodes. -pub(crate) struct NodeCached { +pub(crate) struct NodeCached { /// The cached node. - pub node: NodeOwned, + pub node: NodeOwned, /// Whether this node was fetched from the shared cache or not. pub is_from_shared_cache: bool, } -impl NodeCached { +impl NodeCached { /// Returns the number of bytes allocated on the heap by this node. fn heap_size(&self) -> usize { - self.node.size_in_bytes() - std::mem::size_of::>() + self.node.size_in_bytes() - std::mem::size_of::>() } } -type NodeCacheMap = LruMap, LocalNodeCacheLimiter, schnellru::RandomState>; +type NodeCacheMap = + LruMap<(H, L), NodeCached, LocalNodeCacheLimiter, schnellru::RandomState>; -type ValueCacheMap = LruMap< +type ValueCacheMap = LruMap< ValueCacheKey, - CachedValue, + CachedValue, LocalValueCacheLimiter, BuildNoHashHasher>, >; @@ -329,15 +328,15 @@ type ValueAccessSet = /// When using [`Self::as_trie_db_cache`] or [`Self::as_trie_db_mut_cache`], it will lock Mutexes. /// So, it is important that these methods are not called multiple times, because they otherwise /// deadlock. -pub struct LocalTrieCache { +pub struct LocalTrieCache { /// The shared trie cache that created this instance. - shared: SharedTrieCache, + shared: SharedTrieCache, /// The local cache for the trie nodes. - node_cache: Mutex>, + node_cache: Mutex>, /// The local cache for the values. - value_cache: Mutex>, + value_cache: Mutex>, /// Keeps track of all values accessed in the shared cache. /// @@ -349,14 +348,16 @@ pub struct LocalTrieCache { /// cache for a given key. shared_value_cache_access: Mutex, + new_nodes: Mutex>, + stats: TrieHitStats, } -impl LocalTrieCache { +impl LocalTrieCache { /// Return self as a [`TrieDB`](trie_db::TrieDB) compatible cache. /// /// The given `storage_root` needs to be the storage root of the trie this cache is used for. - pub fn as_trie_db_cache(&self, storage_root: H::Out) -> TrieCache<'_, H> { + pub fn as_trie_db_cache(&self, storage_root: H::Out) -> TrieCache<'_, H, L> { let value_cache = ValueCache::ForStorageRoot { storage_root, local_value_cache: self.value_cache.lock(), @@ -367,6 +368,7 @@ impl LocalTrieCache { TrieCache { shared_cache: self.shared.clone(), local_cache: self.node_cache.lock(), + new_nodes: self.new_nodes.lock(), value_cache, stats: &self.stats, } @@ -379,17 +381,20 @@ impl LocalTrieCache { /// cache instance. If the function is not called, cached data is just thrown away and not /// propagated to the shared cache. So, accessing these new items will be slower, but nothing /// would break because of this. - pub fn as_trie_db_mut_cache(&self) -> TrieCache<'_, H> { + pub fn as_trie_db_mut_cache(&self) -> TrieCache<'_, H, L> { TrieCache { shared_cache: self.shared.clone(), local_cache: self.node_cache.lock(), + new_nodes: self.new_nodes.lock(), value_cache: ValueCache::Fresh(Default::default()), stats: &self.stats, } } } -impl Drop for LocalTrieCache { +impl Drop + for LocalTrieCache +{ fn drop(&mut self) { tracing::debug!( target: LOG_TARGET, @@ -414,7 +419,9 @@ impl Drop for LocalTrieCache { }, }; - shared_inner.node_cache_mut().update(self.node_cache.get_mut().drain()); + shared_inner + .node_cache_mut() + .update(self.node_cache.get_mut().drain(), self.new_nodes.get_mut().drain()); shared_inner.value_cache_mut().update( self.value_cache.get_mut().drain(), @@ -424,30 +431,30 @@ impl Drop for LocalTrieCache { } /// The abstraction of the value cache for the [`TrieCache`]. -enum ValueCache<'a, H: Hasher> { +enum ValueCache<'a, H: Hasher, L> { /// The value cache is fresh, aka not yet associated to any storage root. /// This is used for example when a new trie is being build, to cache new values. - Fresh(HashMap, CachedValue>), + Fresh(HashMap, CachedValue>), /// The value cache is already bound to a specific storage root. ForStorageRoot { shared_value_cache_access: MutexGuard<'a, ValueAccessSet>, - local_value_cache: MutexGuard<'a, ValueCacheMap>, + local_value_cache: MutexGuard<'a, ValueCacheMap>, storage_root: H::Out, // The shared value cache needs to be temporarily locked when reading from it // so we need to clone the value that is returned, but we need to be able to // return a reference to the value, so we just buffer it here. - buffered_value: Option>, + buffered_value: Option>, }, } -impl ValueCache<'_, H> { +impl ValueCache<'_, H, L> { /// Get the value for the given `key`. fn get( &mut self, key: &[u8], - shared_cache: &SharedTrieCache, + shared_cache: &SharedTrieCache, stats: &HitStats, - ) -> Option<&CachedValue> { + ) -> Option<&CachedValue> { stats.local_fetch_attempts.fetch_add(1, Ordering::Relaxed); match self { @@ -497,7 +504,7 @@ impl ValueCache<'_, H> { } /// Insert some new `value` under the given `key`. - fn insert(&mut self, key: &[u8], value: CachedValue) { + fn insert(&mut self, key: &[u8], value: CachedValue) { match self { Self::Fresh(map) => { map.insert(key.into(), value); @@ -514,21 +521,22 @@ impl ValueCache<'_, H> { /// If this instance was created for using it with a [`TrieDBMut`](trie_db::TrieDBMut), it needs to /// be merged back into the [`LocalTrieCache`] with [`Self::merge_into`] after all operations are /// done. -pub struct TrieCache<'a, H: Hasher> { - shared_cache: SharedTrieCache, - local_cache: MutexGuard<'a, NodeCacheMap>, - value_cache: ValueCache<'a, H>, +pub struct TrieCache<'a, H: Hasher, L> { + shared_cache: SharedTrieCache, + local_cache: MutexGuard<'a, NodeCacheMap>, + value_cache: ValueCache<'a, H, L>, + new_nodes: MutexGuard<'a, HashSet>, stats: &'a TrieHitStats, } -impl<'a, H: Hasher> TrieCache<'a, H> { +impl<'a, H: Hasher, L: Copy + Default + Eq + PartialEq + std::hash::Hash> TrieCache<'a, H, L> { /// Merge this cache into the given [`LocalTrieCache`]. /// /// This function is only required to be called when this instance was created through /// [`LocalTrieCache::as_trie_db_mut_cache`], otherwise this method is a no-op. The given /// `storage_root` is the new storage root that was obtained after finishing all operations /// using the [`TrieDBMut`](trie_db::TrieDBMut). - pub fn merge_into(self, local: &LocalTrieCache, storage_root: H::Out) { + pub fn merge_into(self, local: &LocalTrieCache, storage_root: H::Out) { let ValueCache::Fresh(cache) = self.value_cache else { return }; if !cache.is_empty() { @@ -544,33 +552,40 @@ impl<'a, H: Hasher> TrieCache<'a, H> { } } -impl<'a, H: Hasher> trie_db::TrieCache> for TrieCache<'a, H> { +impl<'a, H: Hasher, L: Copy + Default + Eq + PartialEq + std::hash::Hash> + trie_db::TrieCache, L> for TrieCache<'a, H, L> +{ fn get_or_insert_node( &mut self, hash: H::Out, - fetch_node: &mut dyn FnMut() -> trie_db::Result, H::Out, Error>, - ) -> trie_db::Result<&NodeOwned, H::Out, Error> { + location: L, + fetch_node: &mut dyn FnMut() + -> trie_db::Result, H::Out, Error>, + ) -> trie_db::Result<&NodeOwned, H::Out, Error> { let mut is_local_cache_hit = true; self.stats.node_cache.local_fetch_attempts.fetch_add(1, Ordering::Relaxed); // First try to grab the node from the local cache. - let node = self.local_cache.get_or_insert_fallible(hash, || { + let node = self.local_cache.get_or_insert_fallible((hash, location), || { is_local_cache_hit = false; // It was not in the local cache; try the shared cache. self.stats.node_cache.shared_fetch_attempts.fetch_add(1, Ordering::Relaxed); - if let Some(node) = self.shared_cache.peek_node(&hash) { + if let Some(node) = self.shared_cache.peek_node(&hash, location) { self.stats.node_cache.shared_hits.fetch_add(1, Ordering::Relaxed); tracing::trace!(target: LOG_TARGET, ?hash, "Serving node from shared cache"); - return Ok(NodeCached:: { node: node.clone(), is_from_shared_cache: true }) + return Ok(NodeCached:: { + node: node.clone(), + is_from_shared_cache: true, + }) } // It was not in the shared cache; try fetching it from the database. match fetch_node() { Ok(node) => { tracing::trace!(target: LOG_TARGET, ?hash, "Serving node from database"); - Ok(NodeCached:: { node, is_from_shared_cache: false }) + Ok(NodeCached:: { node, is_from_shared_cache: false }) }, Err(error) => { tracing::trace!(target: LOG_TARGET, ?hash, "Serving node from database failed"); @@ -589,21 +604,21 @@ impl<'a, H: Hasher> trie_db::TrieCache> for TrieCache<'a, H> { .node) } - fn get_node(&mut self, hash: &H::Out) -> Option<&NodeOwned> { + fn get_node(&mut self, hash: &H::Out, location: L) -> Option<&NodeOwned> { let mut is_local_cache_hit = true; self.stats.node_cache.local_fetch_attempts.fetch_add(1, Ordering::Relaxed); // First try to grab the node from the local cache. - let cached_node = self.local_cache.get_or_insert_fallible(*hash, || { + let cached_node = self.local_cache.get_or_insert_fallible((*hash, location), || { is_local_cache_hit = false; // It was not in the local cache; try the shared cache. self.stats.node_cache.shared_fetch_attempts.fetch_add(1, Ordering::Relaxed); - if let Some(node) = self.shared_cache.peek_node(&hash) { + if let Some(node) = self.shared_cache.peek_node(hash, location) { self.stats.node_cache.shared_hits.fetch_add(1, Ordering::Relaxed); tracing::trace!(target: LOG_TARGET, ?hash, "Serving node from shared cache"); - Ok(NodeCached:: { node: node.clone(), is_from_shared_cache: true }) + Ok(NodeCached:: { node: node.clone(), is_from_shared_cache: true }) } else { tracing::trace!(target: LOG_TARGET, ?hash, "Serving node from cache failed"); @@ -627,7 +642,7 @@ impl<'a, H: Hasher> trie_db::TrieCache> for TrieCache<'a, H> { } } - fn lookup_value_for_key(&mut self, key: &[u8]) -> Option<&CachedValue> { + fn lookup_value_for_key(&mut self, key: &[u8]) -> Option<&CachedValue> { let res = self.value_cache.get(key, &self.shared_cache, &self.stats.value_cache); tracing::trace!( @@ -640,7 +655,7 @@ impl<'a, H: Hasher> trie_db::TrieCache> for TrieCache<'a, H> { res } - fn cache_value_for_key(&mut self, key: &[u8], data: CachedValue) { + fn cache_value_for_key(&mut self, key: &[u8], data: CachedValue) { tracing::trace!( target: LOG_TARGET, key = ?sp_core::hexdisplay::HexDisplay::from(&key), @@ -649,17 +664,21 @@ impl<'a, H: Hasher> trie_db::TrieCache> for TrieCache<'a, H> { self.value_cache.insert(key, data); } + + fn insert_new_node(&mut self, hash: &H::Out) { + self.new_nodes.insert(*hash); + } } #[cfg(test)] mod tests { use super::*; - use trie_db::{Bytes, Trie, TrieDBBuilder, TrieDBMutBuilder, TrieHash, TrieMut}; + use trie_db::{Bytes, Trie, TrieDBBuilder, TrieDBMutBuilder, TrieHash}; type MemoryDB = crate::MemoryDB; - type Layout = crate::LayoutV1; - type Cache = super::SharedTrieCache; - type Recorder = crate::recorder::Recorder; + type Layout = crate::LayoutV1; + type Cache = super::SharedTrieCache; + type Recorder = crate::recorder::Recorder; const TEST_DATA: &[(&[u8], &[u8])] = &[(b"key1", b"val1"), (b"key2", &[2; 64]), (b"key3", b"val3"), (b"key4", &[4; 64])]; @@ -668,15 +687,11 @@ mod tests { fn create_trie() -> (MemoryDB, TrieHash) { let mut db = MemoryDB::default(); - let mut root = Default::default(); - - { - let mut trie = TrieDBMutBuilder::::new(&mut db, &mut root).build(); - for (k, v) in TEST_DATA { - trie.insert(k, v).expect("Inserts data"); - } + let mut trie = TrieDBMutBuilder::::new(&db).build(); + for (k, v) in TEST_DATA { + trie.insert(k, v).expect("Inserts data"); } - + let root = trie.commit().apply_to(&mut db); (db, root) } @@ -736,23 +751,24 @@ mod tests { let new_value = vec![23; 64]; let shared_cache = Cache::new(CACHE_SIZE); - let mut new_root = root; - { + let new_root = { let local_cache = shared_cache.local_cache(); let mut cache = local_cache.as_trie_db_mut_cache(); - { - let mut trie = TrieDBMutBuilder::::from_existing(&mut db, &mut new_root) - .with_cache(&mut cache) - .build(); + let mut trie = TrieDBMutBuilder::::from_existing(&mut db, root) + .with_cache(&mut cache) + .build(); - trie.insert(&new_key, &new_value).unwrap(); - } + trie.insert(&new_key, &new_value).unwrap(); + let new_root = trie.commit().apply_to(&mut db); + let trie = TrieDBBuilder::::new(&db, &new_root).with_cache(&mut cache).build(); + trie.get(&new_key).unwrap().unwrap(); cache.merge_into(&local_cache, new_root); - } + new_root + }; // After the local cache is dropped, all changes should have been merged back to the shared // cache. @@ -828,13 +844,12 @@ mod tests { let recorder = Recorder::default(); let local_cache = shared_cache.local_cache(); - let mut new_root = root; - { + let new_root = { let mut db = db.clone(); let mut cache = local_cache.as_trie_db_cache(root); let mut recorder = recorder.as_trie_recorder(root); - let mut trie = TrieDBMutBuilder::::from_existing(&mut db, &mut new_root) + let mut trie = TrieDBMutBuilder::::from_existing(&mut db, root) .with_cache(&mut cache) .with_recorder(&mut recorder) .build(); @@ -842,21 +857,18 @@ mod tests { for (key, value) in DATA_TO_ADD { trie.insert(key, value).unwrap(); } - } + trie.commit().root_hash() + }; let storage_proof = recorder.drain_storage_proof(); let mut memory_db: MemoryDB = storage_proof.into_memory_db(); - let mut proof_root = root; - { - let mut trie = - TrieDBMutBuilder::::from_existing(&mut memory_db, &mut proof_root) - .build(); + let mut trie = TrieDBMutBuilder::::from_existing(&mut memory_db, root).build(); - for (key, value) in DATA_TO_ADD { - trie.insert(key, value).unwrap(); - } + for (key, value) in DATA_TO_ADD { + trie.insert(key, value).unwrap(); } + let proof_root = trie.commit().root_hash(); assert_eq!(new_root, proof_root) } @@ -958,22 +970,18 @@ mod tests { { let local_cache = shared_cache.local_cache(); - let mut new_root = root; - { let mut cache = local_cache.as_trie_db_cache(root); - { - let mut trie = - TrieDBMutBuilder::::from_existing(&mut db, &mut new_root) - .with_cache(&mut cache) - .build(); - - let value = vec![10u8; 100]; - // Ensure we add enough data that would overflow the cache. - for i in 0..CACHE_SIZE_RAW / 100 * 2 { - trie.insert(format!("key{}", i).as_bytes(), &value).unwrap(); - } + let mut trie = TrieDBMutBuilder::::from_existing(&mut db, root) + .with_cache(&mut cache) + .build(); + + let value = vec![10u8; 100]; + // Ensure we add enough data that would overflow the cache. + for i in 0..CACHE_SIZE_RAW / 100 * 2 { + trie.insert(format!("key{}", i).as_bytes(), &value).unwrap(); } + let new_root = trie.commit().root_hash(); cache.merge_into(&local_cache, new_root); } diff --git a/substrate/primitives/trie/src/cache/shared_cache.rs b/substrate/primitives/trie/src/cache/shared_cache.rs index e3ba94a2af7c..9ac6a764efa6 100644 --- a/substrate/primitives/trie/src/cache/shared_cache.rs +++ b/substrate/primitives/trie/src/cache/shared_cache.rs @@ -18,7 +18,6 @@ ///! Provides the [`SharedNodeCache`], the [`SharedValueCache`] and the [`SharedTrieCache`] ///! that combines both caches and is exported to the outside. use super::{CacheSize, NodeCached}; -use hash_db::Hasher; use nohash_hasher::BuildNoHashHasher; use parking_lot::{Mutex, RwLock, RwLockWriteGuard}; use schnellru::LruMap; @@ -27,7 +26,7 @@ use std::{ hash::{BuildHasher, Hasher as _}, sync::Arc, }; -use trie_db::{node::NodeOwned, CachedValue}; +use trie_db::{node::NodeOwned, node_db::Hasher, CachedValue}; lazy_static::lazy_static! { static ref RANDOM_STATE: ahash::RandomState = { @@ -62,7 +61,7 @@ pub struct SharedNodeCacheLimiter { max_items_evicted: usize, } -impl schnellru::Limiter> for SharedNodeCacheLimiter +impl schnellru::Limiter, L)> for SharedNodeCacheLimiter where H: AsRef<[u8]>, { @@ -82,9 +81,9 @@ where &mut self, _length: usize, key: Self::KeyToInsert<'_>, - node: NodeOwned, - ) -> Option<(H, NodeOwned)> { - let new_item_heap_size = node.size_in_bytes() - std::mem::size_of::>(); + node: (NodeOwned, L), + ) -> Option<(H, (NodeOwned, L))> { + let new_item_heap_size = node.0.size_in_bytes() - std::mem::size_of::>(); if new_item_heap_size > self.max_heap_size { // Item's too big to add even if the cache's empty; bail. return None @@ -100,18 +99,18 @@ where _length: usize, _old_key: &mut H, _new_key: H, - old_node: &mut NodeOwned, - new_node: &mut NodeOwned, + old_node: &mut (NodeOwned, L), + new_node: &mut (NodeOwned, L), ) -> bool { - debug_assert_eq!(_old_key.as_ref(), _new_key.as_ref()); - - let new_item_heap_size = new_node.size_in_bytes() - std::mem::size_of::>(); + let new_item_heap_size = + new_node.0.size_in_bytes() - std::mem::size_of::>(); if new_item_heap_size > self.max_heap_size { // Item's too big to add even if the cache's empty; bail. return false } - let old_item_heap_size = old_node.size_in_bytes() - std::mem::size_of::>(); + let old_item_heap_size = + old_node.0.size_in_bytes() - std::mem::size_of::>(); self.heap_size = self.heap_size - old_item_heap_size + new_item_heap_size; true } @@ -122,8 +121,8 @@ where } #[inline] - fn on_removed(&mut self, _: &mut H, node: &mut NodeOwned) { - self.heap_size -= node.size_in_bytes() - std::mem::size_of::>(); + fn on_removed(&mut self, _: &mut H, node: &mut (NodeOwned, L)) { + self.heap_size -= node.0.size_in_bytes() - std::mem::size_of::>(); self.items_evicted += 1; } @@ -161,7 +160,7 @@ pub struct SharedValueCacheLimiter { max_items_evicted: usize, } -impl schnellru::Limiter, CachedValue> for SharedValueCacheLimiter +impl schnellru::Limiter, CachedValue> for SharedValueCacheLimiter where H: AsRef<[u8]>, { @@ -178,8 +177,8 @@ where &mut self, _length: usize, mut key: Self::KeyToInsert<'_>, - value: CachedValue, - ) -> Option<(ValueCacheKey, CachedValue)> { + value: CachedValue, + ) -> Option<(ValueCacheKey, CachedValue)> { match self.known_storage_keys.entry(key.storage_key.clone()) { SetEntry::Vacant(entry) => { let new_item_heap_size = key.storage_key.len(); @@ -205,15 +204,15 @@ where _length: usize, _old_key: &mut ValueCacheKey, _new_key: ValueCacheKey, - _old_value: &mut CachedValue, - _new_value: &mut CachedValue, + _old_value: &mut CachedValue, + _new_value: &mut CachedValue, ) -> bool { debug_assert_eq!(_new_key.storage_key, _old_key.storage_key); true } #[inline] - fn on_removed(&mut self, key: &mut ValueCacheKey, _: &mut CachedValue) { + fn on_removed(&mut self, key: &mut ValueCacheKey, _: &mut CachedValue) { if Arc::strong_count(&key.storage_key) == 2 { // There are only two instances of this key: // 1) one memoized in `known_storage_keys`, @@ -238,22 +237,24 @@ where } } -type SharedNodeCacheMap = - LruMap, SharedNodeCacheLimiter, schnellru::RandomState>; +type SharedNodeCacheMap = + LruMap, L), SharedNodeCacheLimiter, schnellru::RandomState>; /// The shared node cache. /// /// Internally this stores all cached nodes in a [`LruMap`]. It ensures that when updating the /// cache, that the cache stays within its allowed bounds. -pub(super) struct SharedNodeCache +pub(super) struct SharedNodeCache where H: AsRef<[u8]>, { /// The cached nodes, ordered by least recently used. - pub(super) lru: SharedNodeCacheMap, + pub(super) lru: SharedNodeCacheMap, } -impl + Eq + std::hash::Hash> SharedNodeCache { +impl + Eq + std::hash::Hash, L: Copy + Default + Eq + PartialEq> + SharedNodeCache +{ /// Create a new instance. fn new(max_inline_size: usize, max_heap_size: usize) -> Self { Self { @@ -268,7 +269,11 @@ impl + Eq + std::hash::Hash> SharedNodeCache { } /// Update the cache with the `list` of nodes which were either newly added or accessed. - pub fn update(&mut self, list: impl IntoIterator)>) { + pub fn update( + &mut self, + list: impl IntoIterator)>, + new_nodes: impl IntoIterator, + ) { let mut access_count = 0; let mut add_count = 0; @@ -276,7 +281,7 @@ impl + Eq + std::hash::Hash> SharedNodeCache { self.lru.limiter_mut().max_items_evicted = self.lru.len() * 100 / super::SHARED_NODE_CACHE_MAX_REPLACE_PERCENT; - for (key, cached_node) in list { + for ((key, location), cached_node) in list { if cached_node.is_from_shared_cache { if self.lru.get(&key).is_some() { access_count += 1; @@ -290,7 +295,7 @@ impl + Eq + std::hash::Hash> SharedNodeCache { } } - self.lru.insert(key, cached_node.node); + self.lru.insert(key, (cached_node.node, location)); add_count += 1; if self.lru.limiter().items_evicted > self.lru.limiter().max_items_evicted { @@ -299,11 +304,19 @@ impl + Eq + std::hash::Hash> SharedNodeCache { } } + let mut removed_new = 0; + for key in new_nodes { + if self.lru.remove(&key).is_some() { + removed_new += 1; + } + } + tracing::debug!( target: super::LOG_TARGET, - "Updated the shared node cache: {} accesses, {} new values, {}/{} evicted (length = {}, inline size={}/{}, heap size={}/{})", + "Updated the shared node cache: {} accesses, {} new values, {} cleanded, {}/{} evicted (length = {}, inline size={}/{}, heap size={}/{})", access_count, add_count, + removed_new, self.lru.limiter().items_evicted, self.lru.limiter().max_items_evicted, self.lru.len(), @@ -464,9 +477,9 @@ impl PartialEq for ValueCacheKey { } } -type SharedValueCacheMap = schnellru::LruMap< +type SharedValueCacheMap = schnellru::LruMap< ValueCacheKey, - CachedValue, + CachedValue, SharedValueCacheLimiter, BuildNoHashHasher>, >; @@ -474,15 +487,15 @@ type SharedValueCacheMap = schnellru::LruMap< /// The shared value cache. /// /// The cache ensures that it stays in the configured size bounds. -pub(super) struct SharedValueCache +pub(super) struct SharedValueCache where H: AsRef<[u8]>, { /// The cached nodes, ordered by least recently used. - pub(super) lru: SharedValueCacheMap, + pub(super) lru: SharedValueCacheMap, } -impl> SharedValueCache { +impl, L> SharedValueCache { /// Create a new instance. fn new(max_inline_size: usize, max_heap_size: usize) -> Self { Self { @@ -509,7 +522,7 @@ impl> SharedValueCache { /// `added` ones. pub fn update( &mut self, - added: impl IntoIterator, CachedValue)>, + added: impl IntoIterator, CachedValue)>, accessed: impl IntoIterator, ) { let mut access_count = 0; @@ -568,31 +581,31 @@ impl> SharedValueCache { } /// The inner of [`SharedTrieCache`]. -pub(super) struct SharedTrieCacheInner { - node_cache: SharedNodeCache, - value_cache: SharedValueCache, +pub(super) struct SharedTrieCacheInner { + node_cache: SharedNodeCache, + value_cache: SharedValueCache, } -impl SharedTrieCacheInner { +impl SharedTrieCacheInner { /// Returns a reference to the [`SharedValueCache`]. #[cfg(test)] - pub(super) fn value_cache(&self) -> &SharedValueCache { + pub(super) fn value_cache(&self) -> &SharedValueCache { &self.value_cache } /// Returns a mutable reference to the [`SharedValueCache`]. - pub(super) fn value_cache_mut(&mut self) -> &mut SharedValueCache { + pub(super) fn value_cache_mut(&mut self) -> &mut SharedValueCache { &mut self.value_cache } /// Returns a reference to the [`SharedNodeCache`]. #[cfg(test)] - pub(super) fn node_cache(&self) -> &SharedNodeCache { + pub(super) fn node_cache(&self) -> &SharedNodeCache { &self.node_cache } /// Returns a mutable reference to the [`SharedNodeCache`]. - pub(super) fn node_cache_mut(&mut self) -> &mut SharedNodeCache { + pub(super) fn node_cache_mut(&mut self) -> &mut SharedNodeCache { &mut self.node_cache } } @@ -604,17 +617,17 @@ impl SharedTrieCacheInner { /// bounds given via the [`CacheSize`] at startup. /// /// The instance of this object can be shared between multiple threads. -pub struct SharedTrieCache { - inner: Arc>>, +pub struct SharedTrieCache { + inner: Arc>>, } -impl Clone for SharedTrieCache { +impl Clone for SharedTrieCache { fn clone(&self) -> Self { Self { inner: self.inner.clone() } } } -impl SharedTrieCache { +impl SharedTrieCache { /// Create a new [`SharedTrieCache`]. pub fn new(cache_size: CacheSize) -> Self { let total_budget = cache_size.0; @@ -630,12 +643,14 @@ impl SharedTrieCache { // Calculate how much memory the maps will be allowed to hold inline given our budget. let value_cache_max_inline_size = - SharedValueCacheMap::::memory_usage_for_memory_budget( + SharedValueCacheMap::::memory_usage_for_memory_budget( value_cache_inline_budget, ); let node_cache_max_inline_size = - SharedNodeCacheMap::::memory_usage_for_memory_budget(node_cache_inline_budget); + SharedNodeCacheMap::::memory_usage_for_memory_budget( + node_cache_inline_budget, + ); // And this is how much data we'll at most keep on the heap for each cache. let value_cache_max_heap_size = value_cache_budget - value_cache_max_inline_size; @@ -666,11 +681,12 @@ impl SharedTrieCache { } /// Create a new [`LocalTrieCache`](super::LocalTrieCache) instance from this shared cache. - pub fn local_cache(&self) -> super::LocalTrieCache { + pub fn local_cache(&self) -> super::LocalTrieCache { super::LocalTrieCache { shared: self.clone(), node_cache: Default::default(), value_cache: Default::default(), + new_nodes: Default::default(), shared_value_cache_access: Mutex::new(super::ValueAccessSet::with_hasher( schnellru::ByLength::new(super::SHARED_VALUE_CACHE_MAX_PROMOTED_KEYS), Default::default(), @@ -685,8 +701,16 @@ impl SharedTrieCache { /// /// This doesn't change the least recently order in the internal [`LruMap`]. #[inline] - pub fn peek_node(&self, key: &H::Out) -> Option> { - self.inner.read().node_cache.lru.peek(key).cloned() + pub fn peek_node(&self, key: &H::Out, location: L) -> Option> { + if let Some((val, loc)) = self.inner.read().node_cache.lru.peek(key).cloned() { + if loc == location { + Some(val) + } else { + None + } + } else { + None + } } /// Get a copy of the [`CachedValue`] for `key`. @@ -699,7 +723,7 @@ impl SharedTrieCache { hash: ValueCacheKeyHash, storage_root: &H::Out, storage_key: &[u8], - ) -> Option> { + ) -> Option> { self.inner .read() .value_cache @@ -739,12 +763,14 @@ impl SharedTrieCache { #[cfg(test)] pub(super) fn read_lock_inner( &self, - ) -> parking_lot::RwLockReadGuard<'_, SharedTrieCacheInner> { + ) -> parking_lot::RwLockReadGuard<'_, SharedTrieCacheInner> { self.inner.read() } /// Returns the write locked inner. - pub(super) fn write_lock_inner(&self) -> Option>> { + pub(super) fn write_lock_inner( + &self, + ) -> Option>> { // This should never happen, but we *really* don't want to deadlock. So let's have it // timeout, just in case. At worst it'll do nothing, and at best it'll avert a catastrophe // and notify us that there's a problem. @@ -759,7 +785,7 @@ mod tests { #[test] fn shared_value_cache_works() { - let mut cache = SharedValueCache::::new(usize::MAX, 10 * 10); + let mut cache = SharedValueCache::::new(usize::MAX, 10 * 10); let key = vec![0; 10]; @@ -847,7 +873,7 @@ mod tests { assert!(matches!( cache.lru.peek(&ValueCacheKey::new_value(&[1; 10][..], root0)).unwrap(), - CachedValue::::NonExisting + CachedValue::::NonExisting )); assert!(cache.lru.peek(&ValueCacheKey::new_value(&[1; 10][..], root1)).is_none(),); diff --git a/substrate/primitives/trie/src/lib.rs b/substrate/primitives/trie/src/lib.rs index fd1320b3fbcb..610e45befb91 100644 --- a/substrate/primitives/trie/src/lib.rs +++ b/substrate/primitives/trie/src/lib.rs @@ -19,6 +19,8 @@ #![cfg_attr(not(feature = "std"), no_std)] +extern crate alloc; + #[cfg(feature = "std")] pub mod cache; mod error; @@ -35,11 +37,6 @@ pub mod proof_size_extension; /// Our `NodeCodec`-specific error. pub use error::Error; -/// Various re-exports from the `hash-db` crate. -pub use hash_db::{HashDB as HashDBT, EMPTY_PREFIX}; -use hash_db::{Hasher, Prefix}; -/// Various re-exports from the `memory-db` crate. -pub use memory_db::{prefixed_key, HashKey, KeyFunction, PrefixedKey}; /// The Substrate format implementation of `NodeCodec`. pub use node_codec::NodeCodec; use sp_std::{borrow::Borrow, boxed::Box, marker::PhantomData, vec::Vec}; @@ -47,27 +44,37 @@ pub use storage_proof::{CompactProof, StorageProof}; /// Trie codec reexport, mainly child trie support /// for trie compact proof. pub use trie_codec::{decode_compact, encode_compact, Error as CompactProofError}; +/// Various re-exports from the `memory_db` module of `trie-db` crate. +pub use trie_db::memory_db::{prefixed_key, HashKey, KeyFunction, PrefixedKey}; +/// Various re-exports from the `node_db` module of `trie-db` crate. +pub use trie_db::node_db::{NodeDB as NodeDBT, EMPTY_PREFIX}; use trie_db::proof::{generate_proof, verify_proof}; /// Various re-exports from the `trie-db` crate. pub use trie_db::{ nibble_ops, node::{NodePlan, ValuePlan}, - CError, DBValue, Query, Recorder, Trie, TrieCache, TrieConfiguration, TrieDBIterator, - TrieDBKeyIterator, TrieDBRawIterator, TrieLayout, TrieMut, TrieRecorder, + CError, Changeset, DBValue, ExistingChangesetNode, Location, NewChangesetNode, Query, Recorder, + Trie, TrieCache, TrieConfiguration, TrieDBIterator, TrieDBKeyIterator, TrieDBRawIterator, + TrieLayout, TrieRecorder, +}; +pub use trie_db::{ + node_db::{Hasher, Prefix}, + proof::VerifyError, + MerkleValue, }; -pub use trie_db::{proof::VerifyError, MerkleValue}; /// The Substrate format implementation of `TrieStream`. pub use trie_stream::TrieStream; /// substrate trie layout -pub struct LayoutV0(PhantomData); +pub struct LayoutV0(PhantomData<(H, DL)>); /// substrate trie layout, with external value nodes. -pub struct LayoutV1(PhantomData); +pub struct LayoutV1(PhantomData<(H, DL)>); -impl TrieLayout for LayoutV0 +impl TrieLayout for LayoutV0 where H: Hasher, + DL: Location, { const USE_EXTENSION: bool = false; const ALLOW_EMPTY: bool = true; @@ -75,11 +82,13 @@ where type Hash = H; type Codec = NodeCodec; + type Location = DL; } -impl TrieConfiguration for LayoutV0 +impl TrieConfiguration for LayoutV0 where H: Hasher, + DL: Location, { fn trie_root(input: I) -> ::Out where @@ -87,7 +96,10 @@ where A: AsRef<[u8]> + Ord, B: AsRef<[u8]>, { - trie_root::trie_root_no_extension::(input, Self::MAX_INLINE_VALUE) + trie_db::trie_root::trie_root_no_extension::( + input, + Self::MAX_INLINE_VALUE, + ) } fn trie_root_unhashed(input: I) -> Vec @@ -96,7 +108,7 @@ where A: AsRef<[u8]> + Ord, B: AsRef<[u8]>, { - trie_root::unhashed_trie_no_extension::( + trie_db::trie_root::unhashed_trie_no_extension::( input, Self::MAX_INLINE_VALUE, ) @@ -107,9 +119,10 @@ where } } -impl TrieLayout for LayoutV1 +impl TrieLayout for LayoutV1 where H: Hasher, + DL: Location, { const USE_EXTENSION: bool = false; const ALLOW_EMPTY: bool = true; @@ -117,11 +130,13 @@ where type Hash = H; type Codec = NodeCodec; + type Location = DL; } -impl TrieConfiguration for LayoutV1 +impl TrieConfiguration for LayoutV1 where H: Hasher, + DL: Location, { fn trie_root(input: I) -> ::Out where @@ -129,7 +144,10 @@ where A: AsRef<[u8]> + Ord, B: AsRef<[u8]>, { - trie_root::trie_root_no_extension::(input, Self::MAX_INLINE_VALUE) + trie_db::trie_root::trie_root_no_extension::( + input, + Self::MAX_INLINE_VALUE, + ) } fn trie_root_unhashed(input: I) -> Vec @@ -138,7 +156,7 @@ where A: AsRef<[u8]> + Ord, B: AsRef<[u8]>, { - trie_root::unhashed_trie_no_extension::( + trie_db::trie_root::unhashed_trie_no_extension::( input, Self::MAX_INLINE_VALUE, ) @@ -149,18 +167,25 @@ where } } +/// DB location hint for a trie node. +#[cfg(feature = "std")] +pub type DBLocation = u64; + +#[cfg(not(feature = "std"))] +pub type DBLocation = (); + /// Type that is able to provide a [`trie_db::TrieRecorder`]. /// /// Types implementing this trait can be used to maintain recorded state /// across operations on different [`trie_db::TrieDB`] instances. -pub trait TrieRecorderProvider { +pub trait TrieRecorderProvider { /// Recorder type that is going to be returned by implementors of this trait. - type Recorder<'a>: trie_db::TrieRecorder + 'a + type Recorder<'a>: trie_db::TrieRecorder + 'a where Self: 'a; /// Create a [`StorageProof`] derived from the internal state. - fn drain_storage_proof(self) -> Option; + fn drain_storage_proof(&self) -> Option; /// Provide a recorder implementing [`trie_db::TrieRecorder`]. fn as_trie_recorder(&self, storage_root: H::Out) -> Self::Recorder<'_>; @@ -174,21 +199,19 @@ pub trait ProofSizeProvider { /// TrieDB error over `TrieConfiguration` trait. pub type TrieError = trie_db::TrieError, CError>; -/// Reexport from `hash_db`, with genericity set for `Hasher` trait. -pub trait AsHashDB: hash_db::AsHashDB {} -impl> AsHashDB for T {} -/// Reexport from `hash_db`, with genericity set for `Hasher` trait. -pub type HashDB<'a, H> = dyn hash_db::HashDB + 'a; -/// Reexport from `hash_db`, with genericity set for `Hasher` trait. -/// This uses a `KeyFunction` for prefixing keys internally (avoiding -/// key conflict for non random keys). -pub type PrefixedMemoryDB = memory_db::MemoryDB, trie_db::DBValue>; -/// Reexport from `hash_db`, with genericity set for `Hasher` trait. +/// Reexport from `trie_db`, with genericity set for `Hasher` trait. +pub type NodeDB<'a, H, L> = dyn trie_db::node_db::NodeDB + 'a; +/// Reexport from `trie_db`, with genericity set for `Hasher` trait. /// This uses a noops `KeyFunction` (key addressing must be hashed or using /// an encoding scheme that avoid key conflict). -pub type MemoryDB = memory_db::MemoryDB, trie_db::DBValue>; -/// Reexport from `hash_db`, with genericity set for `Hasher` trait. -pub type GenericMemoryDB = memory_db::MemoryDB; +pub type MemoryDB = + trie_db::memory_db::MemoryDB, trie_db::DBValue>; +/// Reexport from `trie_db`, with genericity set for `Hasher` trait. +/// This uses a prefixed `KeyFunction` +pub type PrefixedMemoryDB = + trie_db::memory_db::MemoryDB, trie_db::DBValue>; +/// Reexport from `trie_db`, with genericity set for `Hasher` trait. +pub type GenericMemoryDB = trie_db::memory_db::MemoryDB; /// Persistent trie database read-access interface for the a given hasher. pub type TrieDB<'a, 'cache, L> = trie_db::TrieDB<'a, 'cache, L>; @@ -202,6 +225,9 @@ pub type TrieDBMutBuilder<'a, L> = trie_db::TrieDBMutBuilder<'a, L>; pub type Lookup<'a, 'cache, L, Q> = trie_db::Lookup<'a, 'cache, L, Q>; /// Hash type for a trie layout. pub type TrieHash = <::Hash as Hasher>::Out; +/// Change set for child trie. +pub type ChildChangeset = Box>; + /// This module is for non generic definition of trie type. /// Only the `Hasher` trait is generic in this case. pub mod trie_types { @@ -210,23 +236,31 @@ pub mod trie_types { /// Persistent trie database read-access interface for the a given hasher. /// /// Read only V1 and V0 are compatible, thus we always use V1. - pub type TrieDB<'a, 'cache, H> = super::TrieDB<'a, 'cache, LayoutV1>; + pub type TrieDB<'a, 'cache, H> = super::TrieDB<'a, 'cache, LayoutV1>; /// Builder for creating a [`TrieDB`]. - pub type TrieDBBuilder<'a, 'cache, H> = super::TrieDBBuilder<'a, 'cache, LayoutV1>; + pub type TrieDBBuilder<'a, 'cache, H> = + super::TrieDBBuilder<'a, 'cache, LayoutV1>; + /// Builder for creating a [`TrieDB`] for state V0. + pub type TrieDBBuilderV0<'a, 'cache, H, L> = super::TrieDBBuilder<'a, 'cache, LayoutV0>; + /// Builder for creating a [`TrieDB`] for state V1. + pub type TrieDBBuilderV1<'a, 'cache, H, L> = super::TrieDBBuilder<'a, 'cache, LayoutV1>; /// Persistent trie database write-access interface for the a given hasher. - pub type TrieDBMutV0<'a, H> = super::TrieDBMut<'a, LayoutV0>; + pub type TrieDBMutV0<'a, H> = super::TrieDBMut<'a, LayoutV0>; /// Builder for creating a [`TrieDBMutV0`]. - pub type TrieDBMutBuilderV0<'a, H> = super::TrieDBMutBuilder<'a, LayoutV0>; + pub type TrieDBMutBuilderV0<'a, H> = super::TrieDBMutBuilder<'a, LayoutV0>; /// Persistent trie database write-access interface for the a given hasher. - pub type TrieDBMutV1<'a, H> = super::TrieDBMut<'a, LayoutV1>; + pub type TrieDBMutV1<'a, H> = super::TrieDBMut<'a, LayoutV1>; /// Builder for creating a [`TrieDBMutV1`]. - pub type TrieDBMutBuilderV1<'a, H> = super::TrieDBMutBuilder<'a, LayoutV1>; + pub type TrieDBMutBuilderV1<'a, H> = super::TrieDBMutBuilder<'a, LayoutV1>; /// Querying interface, as in `trie_db` but less generic. - pub type Lookup<'a, 'cache, H, Q> = trie_db::Lookup<'a, 'cache, LayoutV1, Q>; + pub type Lookup<'a, 'cache, H, Q> = trie_db::Lookup<'a, 'cache, LayoutV1, Q>; /// As in `trie_db`, but less generic, error type for the crate. pub type TrieError = trie_db::TrieError>; } +/// Alias to root hash and db location. +pub type Root = (TrieHash, ::Location); + /// Create a proof for a subset of keys in a trie. /// /// The `keys` may contain any set of keys regardless of each one of them is included @@ -244,7 +278,7 @@ where L: TrieConfiguration, I: IntoIterator, K: 'a + AsRef<[u8]>, - DB: hash_db::HashDBRef, + DB: trie_db::node_db::NodeDB, { generate_proof::<_, L, _, _>(db, &root, keys) } @@ -272,47 +306,51 @@ where } /// Determine a trie root given a hash DB and delta values. -pub fn delta_trie_root( - db: &mut DB, - mut root: TrieHash, +pub fn delta_trie_root( + db: &dyn trie_db::node_db::NodeDB, + root: Root, delta: I, - recorder: Option<&mut dyn trie_db::TrieRecorder>>, - cache: Option<&mut dyn TrieCache>, -) -> Result, Box>> + recorder: Option<&mut dyn trie_db::TrieRecorder, L::Location>>, + cache: Option<&mut dyn TrieCache>, + keyspace: Option<&[u8]>, +) -> Result, L::Location>, Box>> where - I: IntoIterator, + I: IntoIterator>)>, A: Borrow<[u8]>, B: Borrow>, V: Borrow<[u8]>, - DB: hash_db::HashDB, { - { - let mut trie = TrieDBMutBuilder::::from_existing(db, &mut root) - .with_optional_cache(cache) - .with_optional_recorder(recorder) - .build(); - - let mut delta = delta.into_iter().collect::>(); - delta.sort_by(|l, r| l.0.borrow().cmp(r.0.borrow())); - - for (key, change) in delta { - match change.borrow() { - Some(val) => trie.insert(key.borrow(), val.borrow())?, - None => trie.remove(key.borrow())?, - }; - } - } + let mut trie = TrieDBMutBuilder::::from_existing_with_db_location(db, root.0, root.1) + .with_optional_cache(cache) + .with_optional_recorder(recorder) + .build(); + + let mut delta = delta.into_iter().collect::>(); + delta.sort_by(|l, r| l.0.borrow().cmp(r.0.borrow())); - Ok(root) + for (key, change, set) in delta { + match change.borrow() { + Some(val) => trie.insert_with_tree_ref(key.borrow(), val.borrow(), set)?, + None => trie.remove_with_tree_ref(key.borrow(), set)?, + }; + } + if let Some(ks) = keyspace { + Ok(trie.commit_with_keyspace(ks)) + } else { + Ok(trie.commit()) + } } /// Read a value from the trie. -pub fn read_trie_value>( +pub fn read_trie_value< + L: TrieLayout, + DB: trie_db::node_db::NodeDB, +>( db: &DB, root: &TrieHash, key: &[u8], - recorder: Option<&mut dyn TrieRecorder>>, - cache: Option<&mut dyn TrieCache>, + recorder: Option<&mut dyn TrieRecorder, L::Location>>, + cache: Option<&mut dyn TrieCache>, ) -> Result>, Box>> { TrieDBBuilder::::new(db, root) .with_optional_cache(cache) @@ -321,17 +359,49 @@ pub fn read_trie_value, +>( + db: &DB, + root: &TrieHash, + root_key: &[u8], + recorder: Option<&mut dyn TrieRecorder, L::Location>>, + cache: Option<&mut dyn TrieCache>, +) -> Result, L::Location)>, Box>> { + let trie = TrieDBBuilder::::new(db, root) + .with_optional_cache(cache) + .with_optional_recorder(recorder) + .build(); + + let mut iter = trie_db::TrieDBNodeIterator::new(&trie)?; + use trie_db::TrieIterator; + iter.seek(root_key)?; + let Some(item) = iter.next() else { return Ok(None) }; + let item = item?; + let node = &item.2; + let location = node.node_plan().additional_ref_location(node.locations()); + let Some(root) = iter.item_from_raw(&item) else { return Ok(None) }; + let (root_key2, root) = root?; + // TODO should be a debug_assert + if root_key2.as_slice() != root_key { + return Ok(None); + } + Ok(Some((root, location.unwrap_or_default()))) +} + /// Read the [`trie_db::MerkleValue`] of the node that is the closest descendant for /// the provided key. -pub fn read_trie_first_descedant_value( +pub fn read_trie_first_descendant_value( db: &DB, root: &TrieHash, key: &[u8], - recorder: Option<&mut dyn TrieRecorder>>, - cache: Option<&mut dyn TrieCache>, + recorder: Option<&mut dyn TrieRecorder, L::Location>>, + cache: Option<&mut dyn TrieCache>, ) -> Result>>, Box>> where - DB: hash_db::HashDBRef, + DB: trie_db::node_db::NodeDB, { TrieDBBuilder::::new(db, root) .with_optional_cache(cache) @@ -344,7 +414,7 @@ where pub fn read_trie_value_with< L: TrieLayout, Q: Query>, - DB: hash_db::HashDBRef, + DB: trie_db::node_db::NodeDB, >( db: &DB, root: &TrieHash, @@ -377,44 +447,49 @@ where /// Determine a child trie root given a hash DB and delta values. H is the default hasher, /// but a generic implementation may ignore this type parameter and use other hashers. -pub fn child_delta_trie_root( +pub fn child_delta_trie_root( keyspace: &[u8], - db: &mut DB, + db: &dyn trie_db::node_db::NodeDB, root_data: RD, + root_location: L::Location, delta: I, - recorder: Option<&mut dyn TrieRecorder>>, - cache: Option<&mut dyn TrieCache>, -) -> Result<::Out, Box>> + recorder: Option<&mut dyn TrieRecorder, L::Location>>, + cache: Option<&mut dyn TrieCache>, +) -> Result, L::Location>, Box>> where I: IntoIterator, A: Borrow<[u8]>, B: Borrow>, V: Borrow<[u8]>, RD: AsRef<[u8]>, - DB: hash_db::HashDB, + L::Location: Send + Sync, { let mut root = TrieHash::::default(); // root is fetched from DB, not writable by runtime, so it's always valid. root.as_mut().copy_from_slice(root_data.as_ref()); - let mut db = KeySpacedDBMut::new(db, keyspace); - delta_trie_root::(&mut db, root, delta, recorder, cache) + let db = KeySpacedDB::new(db, keyspace); + delta_trie_root::( + &db, + (root, root_location), + delta.into_iter().map(|(k, v)| (k, v, None)), + recorder, + cache, + Some(keyspace), + ) } /// Read a value from the child trie. -pub fn read_child_trie_value( +pub fn read_child_trie_value( keyspace: &[u8], - db: &DB, - root: &TrieHash, + db: &dyn trie_db::node_db::NodeDB, + root: &Root, key: &[u8], - recorder: Option<&mut dyn TrieRecorder>>, - cache: Option<&mut dyn TrieCache>, -) -> Result>, Box>> -where - DB: hash_db::HashDBRef, -{ + recorder: Option<&mut dyn TrieRecorder, L::Location>>, + cache: Option<&mut dyn TrieCache>, +) -> Result>, Box>> { let db = KeySpacedDB::new(db, keyspace); - TrieDBBuilder::::new(&db, &root) + TrieDBBuilder::::new_with_db_location(&db, &root.0, root.1) .with_optional_recorder(recorder) .with_optional_cache(cache) .build() @@ -423,19 +498,16 @@ where } /// Read a hash from the child trie. -pub fn read_child_trie_hash( +pub fn read_child_trie_hash( keyspace: &[u8], - db: &DB, - root: &TrieHash, + db: &dyn trie_db::node_db::NodeDB, + root: &Root, key: &[u8], - recorder: Option<&mut dyn TrieRecorder>>, - cache: Option<&mut dyn TrieCache>, -) -> Result>, Box>> -where - DB: hash_db::HashDBRef, -{ + recorder: Option<&mut dyn TrieRecorder, L::Location>>, + cache: Option<&mut dyn TrieCache>, +) -> Result>, Box>> { let db = KeySpacedDB::new(db, keyspace); - TrieDBBuilder::::new(&db, &root) + TrieDBBuilder::::new_with_db_location(&db, &root.0, root.1) .with_optional_recorder(recorder) .with_optional_cache(cache) .build() @@ -444,19 +516,19 @@ where /// Read the [`trie_db::MerkleValue`] of the node that is the closest descendant for /// the provided child key. -pub fn read_child_trie_first_descedant_value( +pub fn read_child_trie_first_descendant_value( keyspace: &[u8], db: &DB, - root: &TrieHash, + root: &Root, key: &[u8], - recorder: Option<&mut dyn TrieRecorder>>, - cache: Option<&mut dyn TrieCache>, + recorder: Option<&mut dyn TrieRecorder, L::Location>>, + cache: Option<&mut dyn TrieCache>, ) -> Result>>, Box>> where - DB: hash_db::HashDBRef, + DB: trie_db::node_db::NodeDB, { let db = KeySpacedDB::new(db, keyspace); - TrieDBBuilder::::new(&db, &root) + TrieDBBuilder::::new_with_db_location(&db, &root.0, root.1) .with_optional_recorder(recorder) .with_optional_cache(cache) .build() @@ -466,36 +538,31 @@ where /// Read a value from the child trie with given query. pub fn read_child_trie_value_with( keyspace: &[u8], - db: &DB, + db: &dyn trie_db::node_db::NodeDB, root_slice: &[u8], + root_location: L::Location, key: &[u8], query: Q, ) -> Result>, Box>> where L: TrieConfiguration, Q: Query, - DB: hash_db::HashDBRef, + DB: trie_db::node_db::NodeDB, { let mut root = TrieHash::::default(); // root is fetched from DB, not writable by runtime, so it's always valid. root.as_mut().copy_from_slice(root_slice); let db = KeySpacedDB::new(db, keyspace); - TrieDBBuilder::::new(&db, &root) + TrieDBBuilder::::new_with_db_location(&db, &root, root_location) .build() .get_with(key, query) .map(|x| x.map(|val| val.to_vec())) } -/// `HashDB` implementation that append a encoded prefix (unique id bytes) in addition to the -/// prefix of every key value. -pub struct KeySpacedDB<'a, DB: ?Sized, H>(&'a DB, &'a [u8], PhantomData); - -/// `HashDBMut` implementation that append a encoded prefix (unique id bytes) in addition to the +/// `NodeDB` implementation that append a encoded prefix (unique id bytes) in addition to the /// prefix of every key value. -/// -/// Mutable variant of `KeySpacedDB`, see [`KeySpacedDB`]. -pub struct KeySpacedDBMut<'a, DB: ?Sized, H>(&'a mut DB, &'a [u8], PhantomData); +pub struct KeySpacedDB<'a, H, T, DL>(&'a dyn trie_db::node_db::NodeDB, &'a [u8]); /// Utility function used to merge some byte data (keyspace) and `prefix` data /// before calling key value database primitives. @@ -506,82 +573,27 @@ fn keyspace_as_prefix_alloc(ks: &[u8], prefix: Prefix) -> (Vec, Option) (result, prefix.1) } -impl<'a, DB: ?Sized, H> KeySpacedDB<'a, DB, H> { +impl<'a, H, T, DL> KeySpacedDB<'a, H, T, DL> { /// instantiate new keyspaced db #[inline] - pub fn new(db: &'a DB, ks: &'a [u8]) -> Self { - KeySpacedDB(db, ks, PhantomData) + pub fn new(db: &'a dyn trie_db::node_db::NodeDB, ks: &'a [u8]) -> Self { + KeySpacedDB(db, ks) } } -impl<'a, DB: ?Sized, H> KeySpacedDBMut<'a, DB, H> { - /// instantiate new keyspaced db - pub fn new(db: &'a mut DB, ks: &'a [u8]) -> Self { - KeySpacedDBMut(db, ks, PhantomData) - } -} - -impl<'a, DB, H, T> hash_db::HashDBRef for KeySpacedDB<'a, DB, H> +impl<'a, H, T, L> trie_db::node_db::NodeDB for KeySpacedDB<'a, H, T, L> where - DB: hash_db::HashDBRef + ?Sized, H: Hasher, T: From<&'static [u8]>, { - fn get(&self, key: &H::Out, prefix: Prefix) -> Option { - let derived_prefix = keyspace_as_prefix_alloc(self.1, prefix); - self.0.get(key, (&derived_prefix.0, derived_prefix.1)) - } - - fn contains(&self, key: &H::Out, prefix: Prefix) -> bool { - let derived_prefix = keyspace_as_prefix_alloc(self.1, prefix); - self.0.contains(key, (&derived_prefix.0, derived_prefix.1)) - } -} - -impl<'a, DB, H, T> hash_db::HashDB for KeySpacedDBMut<'a, DB, H> -where - DB: hash_db::HashDB, - H: Hasher, - T: Default + PartialEq + for<'b> From<&'b [u8]> + Clone + Send + Sync, -{ - fn get(&self, key: &H::Out, prefix: Prefix) -> Option { - let derived_prefix = keyspace_as_prefix_alloc(self.1, prefix); - self.0.get(key, (&derived_prefix.0, derived_prefix.1)) - } - - fn contains(&self, key: &H::Out, prefix: Prefix) -> bool { + fn get(&self, key: &H::Out, prefix: Prefix, location: L) -> Option<(T, Vec)> { let derived_prefix = keyspace_as_prefix_alloc(self.1, prefix); - self.0.contains(key, (&derived_prefix.0, derived_prefix.1)) + self.0.get(key, (&derived_prefix.0, derived_prefix.1), location) } - fn insert(&mut self, prefix: Prefix, value: &[u8]) -> H::Out { + fn contains(&self, key: &H::Out, prefix: Prefix, location: L) -> bool { let derived_prefix = keyspace_as_prefix_alloc(self.1, prefix); - self.0.insert((&derived_prefix.0, derived_prefix.1), value) - } - - fn emplace(&mut self, key: H::Out, prefix: Prefix, value: T) { - let derived_prefix = keyspace_as_prefix_alloc(self.1, prefix); - self.0.emplace(key, (&derived_prefix.0, derived_prefix.1), value) - } - - fn remove(&mut self, key: &H::Out, prefix: Prefix) { - let derived_prefix = keyspace_as_prefix_alloc(self.1, prefix); - self.0.remove(key, (&derived_prefix.0, derived_prefix.1)) - } -} - -impl<'a, DB, H, T> hash_db::AsHashDB for KeySpacedDBMut<'a, DB, H> -where - DB: hash_db::HashDB, - H: Hasher, - T: Default + PartialEq + for<'b> From<&'b [u8]> + Clone + Send + Sync, -{ - fn as_hash_db(&self) -> &dyn hash_db::HashDB { - self - } - - fn as_hash_db_mut<'b>(&'b mut self) -> &'b mut (dyn hash_db::HashDB + 'b) { - &mut *self + self.0.contains(key, (&derived_prefix.0, derived_prefix.1), location) } } @@ -601,73 +613,87 @@ mod trie_constants { mod tests { use super::*; use codec::{Compact, Decode, Encode}; - use hash_db::{HashDB, Hasher}; use sp_core::Blake2Hasher; - use trie_db::{DBValue, NodeCodec as NodeCodecT, Trie, TrieMut}; - use trie_standardmap::{Alphabet, StandardMap, ValueMode}; + use trie_db::{ + node_db::{Hasher, NodeDB, NodeDBMut}, + test_utils::{Alphabet, StandardMap, ValueMode}, + DBValue, NodeCodec as NodeCodecT, Trie, + }; + + type LayoutV0 = super::LayoutV0; + type LayoutV1 = super::LayoutV1; + type LayoutV0T = super::LayoutV0>; + type LayoutV1T = super::LayoutV1>; - type LayoutV0 = super::LayoutV0; - type LayoutV1 = super::LayoutV1; + type MemoryDBMeta = trie_db::memory_db::MemoryDB< + Blake2Hasher, + trie_db::memory_db::HashKey, + trie_db::DBValue, + >; - type MemoryDBMeta = memory_db::MemoryDB, trie_db::DBValue>; + type MemTreeDBMeta = trie_db::mem_tree_db::MemTreeDB; fn hashed_null_node() -> TrieHash { ::hashed_null_node() } - fn check_equivalent(input: &Vec<(&[u8], &[u8])>) { - { - let closed_form = T::trie_root(input.clone()); - let d = T::trie_root_unhashed(input.clone()); - println!("Data: {:#x?}, {:#x?}", d, Blake2Hasher::hash(&d[..])); - let persistent = { - let mut memdb = MemoryDBMeta::default(); - let mut root = Default::default(); - let mut t = TrieDBMutBuilder::::new(&mut memdb, &mut root).build(); - for (x, y) in input.iter().rev() { - t.insert(x, y).unwrap(); - } - *t.root() - }; - assert_eq!(closed_form, persistent); - } - } - - fn check_iteration(input: &Vec<(&[u8], &[u8])>) { - let mut memdb = MemoryDBMeta::default(); - let mut root = Default::default(); - { - let mut t = TrieDBMutBuilder::::new(&mut memdb, &mut root).build(); - for (x, y) in input.clone() { + fn check_equivalent(input: &Vec<(&[u8], &[u8])>) + where + T: TrieConfiguration, + D: NodeDBMut + Default, + { + let closed_form = T::trie_root(input.clone()); + let d = T::trie_root_unhashed(input.clone()); + println!("Data: {:#x?}, {:#x?}", d, Blake2Hasher::hash(&d[..])); + let persistent = { + let mut memdb = D::default(); + let mut t = TrieDBMutBuilder::::new(&mut memdb).build(); + for (x, y) in input.iter().rev() { t.insert(x, y).unwrap(); } + t.commit().root_hash() + }; + assert_eq!(closed_form, persistent); + } + + fn check_iteration(input: &Vec<(&[u8], &[u8])>) + where + T: TrieConfiguration, + D: NodeDBMut + Default, + { + let mut memdb = D::default(); + let mut t = TrieDBMutBuilder::::new(&mut memdb).build(); + for (x, y) in input.clone() { + t.insert(x, y).unwrap(); } - { - let t = TrieDBBuilder::::new(&memdb, &root).build(); - assert_eq!( - input.iter().map(|(i, j)| (i.to_vec(), j.to_vec())).collect::>(), - t.iter() - .unwrap() - .map(|x| x.map(|y| (y.0, y.1.to_vec())).unwrap()) - .collect::>() - ); - } + let changes = t.commit(); + let root = memdb.apply_changeset(changes); + let t = TrieDBBuilder::::new(&memdb, &root).build(); + assert_eq!( + input.iter().map(|(i, j)| (i.to_vec(), j.to_vec())).collect::>(), + t.iter() + .unwrap() + .map(|x| x.map(|y| (y.0, y.1.to_vec())).unwrap()) + .collect::>() + ); } fn check_input(input: &Vec<(&[u8], &[u8])>) { - check_equivalent::(input); - check_iteration::(input); - check_equivalent::(input); - check_iteration::(input); + check_equivalent::(input); + check_iteration::(input); + check_equivalent::(input); + check_iteration::(input); + check_equivalent::(input); + check_iteration::(input); + check_equivalent::(input); + check_iteration::(input); } #[test] fn default_trie_root() { let mut db = MemoryDB::default(); - let mut root = TrieHash::::default(); - let mut empty = TrieDBMutBuilder::::new(&mut db, &mut root).build(); - empty.commit(); - let root1 = empty.root().as_ref().to_vec(); + let empty = TrieDBMutBuilder::::new(&mut db).build(); + let root1 = empty.commit().root_hash().as_ref().to_vec(); let root2: Vec = LayoutV1::trie_root::<_, Vec, Vec>(std::iter::empty()) .as_ref() .iter() @@ -769,11 +795,10 @@ mod tests { } fn populate_trie<'db, T: TrieConfiguration>( - db: &'db mut dyn HashDB, - root: &'db mut TrieHash, + db: &'db dyn NodeDB, v: &[(Vec, Vec)], ) -> TrieDBMut<'db, T> { - let mut t = TrieDBMutBuilder::::new(db, root).build(); + let mut t = TrieDBMutBuilder::::new(db).build(); for i in 0..v.len() { let key: &[u8] = &v[i].0; let val: &[u8] = &v[i].1; @@ -791,10 +816,15 @@ mod tests { #[test] fn random_should_work() { - random_should_work_inner::(); - random_should_work_inner::(); + random_should_work_inner::(); + random_should_work_inner::(); + random_should_work_inner::(); } - fn random_should_work_inner() { + fn random_should_work_inner() + where + L: TrieConfiguration, + D: NodeDBMut + Default, + { let mut seed = ::Out::zero(); for test_i in 0..10_000 { if test_i % 50 == 0 { @@ -810,33 +840,35 @@ mod tests { .make_with(seed.as_fixed_bytes_mut()); let real = L::trie_root(x.clone()); - let mut memdb = MemoryDB::default(); - let mut root = Default::default(); + let mut memdb = D::default(); - let mut memtrie = populate_trie::(&mut memdb, &mut root, &x); + let memtrie = populate_trie::(&mut memdb, &x); + let commit = memtrie.commit(); + let root = memdb.apply_changeset(commit); - memtrie.commit(); - if *memtrie.root() != real { + if root != real { println!("TRIE MISMATCH"); println!(); - println!("{:?} vs {:?}", memtrie.root(), real); + println!("{:?} vs {:?}", root, real); for i in &x { println!("{:#x?} -> {:#x?}", i.0, i.1); } } - assert_eq!(*memtrie.root(), real); + assert_eq!(root, real); + let mut memtrie = TrieDBMutBuilder::::from_existing(&memdb, root).build(); unpopulate_trie::(&mut memtrie, &x); - memtrie.commit(); + let commit = memtrie.commit(); + let root = memdb.apply_changeset(commit); let hashed_null_node = hashed_null_node::(); - if *memtrie.root() != hashed_null_node { + if root != hashed_null_node { println!("- TRIE MISMATCH"); println!(); - println!("{:?} vs {:?}", memtrie.root(), hashed_null_node); + println!("{:?} vs {:?}", root, hashed_null_node); for i in &x { println!("{:#x?} -> {:#x?}", i.0, i.1); } } - assert_eq!(*memtrie.root(), hashed_null_node); + assert_eq!(root, hashed_null_node); } } @@ -895,10 +927,15 @@ mod tests { #[test] fn iterator_works() { - iterator_works_inner::(); - iterator_works_inner::(); + iterator_works_inner::(); + iterator_works_inner::(); + iterator_works_inner::(); } - fn iterator_works_inner() { + fn iterator_works_inner() + where + L: TrieConfiguration, + D: NodeDBMut + Default, + { let pairs = vec![ ( array_bytes::hex2bytes_unchecked("0103000000000000000464"), @@ -910,11 +947,12 @@ mod tests { ), ]; - let mut mdb = MemoryDB::default(); - let mut root = Default::default(); - let _ = populate_trie::(&mut mdb, &mut root, &pairs); + let mut mdb = D::default(); + let t = populate_trie::(&mut mdb, &pairs); + let changes = t.commit(); + let root = mdb.apply_changeset(changes); - let trie = TrieDBBuilder::::new(&mdb, &root).build(); + let trie = TrieDBBuilder::::new(&mdb, &root).build(); let iter = trie.iter().unwrap(); let mut iter_pairs = Vec::new(); @@ -934,8 +972,8 @@ mod tests { ]; let mut memdb = MemoryDB::default(); - let mut root = Default::default(); - populate_trie::(&mut memdb, &mut root, &pairs); + let t = populate_trie::(&mut memdb, &pairs); + let root = t.commit().apply_to(&mut memdb); let non_included_key: Vec = array_bytes::hex2bytes_unchecked("0909"); let proof = @@ -967,8 +1005,8 @@ mod tests { ]; let mut memdb = MemoryDB::default(); - let mut root = Default::default(); - populate_trie::(&mut memdb, &mut root, &pairs); + let t = populate_trie::(&mut memdb, &pairs); + let root = t.commit().apply_to(&mut memdb); let proof = generate_trie_proof::(&memdb, root, &[pairs[0].0.clone()]).unwrap(); @@ -1023,22 +1061,30 @@ mod tests { .unwrap(); let proof_db = proof.into_memory_db::(); - let first_storage_root = delta_trie_root::( + let first_storage_root = delta_trie_root::( &mut proof_db.clone(), - storage_root, - valid_delta, + (storage_root, Default::default()), + valid_delta + .iter() + .map(|(k, v)| (k.as_slice(), v.as_ref().map(Vec::as_slice), None)), + None, None, None, ) - .unwrap(); - let second_storage_root = delta_trie_root::( + .unwrap() + .root_hash(); + let second_storage_root = delta_trie_root::( &mut proof_db.clone(), - storage_root, - invalid_delta, + (storage_root, Default::default()), + invalid_delta + .iter() + .map(|(k, v)| (k.as_slice(), v.as_ref().map(Vec::as_slice), None)), + None, None, None, ) - .unwrap(); + .unwrap() + .root_hash(); assert_eq!(first_storage_root, second_storage_root); } @@ -1046,11 +1092,10 @@ mod tests { #[test] fn big_key() { let check = |keysize: usize| { - let mut memdb = PrefixedMemoryDB::::default(); - let mut root = Default::default(); - let mut t = TrieDBMutBuilder::::new(&mut memdb, &mut root).build(); + let mut memdb = MemoryDB::::default(); + let mut t = TrieDBMutBuilder::::new(&mut memdb).build(); t.insert(&vec![0x01u8; keysize][..], &[0x01u8, 0x23]).unwrap(); - std::mem::drop(t); + let root = t.commit().apply_to(&mut memdb); let t = TrieDBBuilder::::new(&memdb, &root).build(); assert_eq!(t.get(&vec![0x01u8; keysize][..]).unwrap(), Some(vec![0x01u8, 0x23])); }; @@ -1064,8 +1109,8 @@ mod tests { b"some_partial".iter().copied(), 24, vec![None; 16].into_iter(), - Some(trie_db::node::Value::Inline(b"value"[..].into())), + Some(trie_db::node::Value::<()>::Inline(b"value"[..].into())), ); - assert!(NodeCodec::::decode(branch.as_slice()).is_err()); + assert!(NodeCodec::::decode(branch.as_slice(), &[()]).is_err()); } } diff --git a/substrate/primitives/trie/src/node_codec.rs b/substrate/primitives/trie/src/node_codec.rs index 46acde77c054..1bc9bfef426c 100644 --- a/substrate/primitives/trie/src/node_codec.rs +++ b/substrate/primitives/trie/src/node_codec.rs @@ -19,12 +19,13 @@ use super::node_header::{NodeHeader, NodeKind}; use crate::{error::Error, trie_constants}; +use alloc::{borrow::Borrow, vec::Vec}; use codec::{Compact, Decode, Encode, Input}; -use hash_db::Hasher; -use sp_std::{borrow::Borrow, marker::PhantomData, ops::Range, vec::Vec}; +use core::{marker::PhantomData, ops::Range}; use trie_db::{ nibble_ops, node::{NibbleSlicePlan, NodeHandlePlan, NodePlan, Value, ValuePlan}, + node_db::Hasher, ChildReference, NodeCodec as NodeCodecT, }; @@ -134,12 +135,14 @@ where None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, ]; + let mut i_hash = 0; for i in 0..nibble_ops::NIBBLE_LENGTH { if bitmap.value_at(i) { let count = >::decode(&mut input)?.0 as usize; let range = input.take(count)?; children[i] = Some(if count == H::LENGTH { - NodeHandlePlan::Hash(range) + i_hash += 1; + NodeHandlePlan::Hash(range, i_hash - 1) } else { NodeHandlePlan::Inline(range) }); @@ -185,7 +188,11 @@ where &[trie_constants::EMPTY_TRIE] } - fn leaf_node(partial: impl Iterator, number_nibble: usize, value: Value) -> Vec { + fn leaf_node( + partial: impl Iterator, + number_nibble: usize, + value: Value, + ) -> Vec { let contains_hash = matches!(&value, Value::Node(..)); let mut output = if contains_hash { partial_from_iterator_encode(partial, number_nibble, NodeKind::HashedValueLeaf) @@ -197,7 +204,7 @@ where Compact(value.len() as u32).encode_to(&mut output); output.extend_from_slice(value); }, - Value::Node(hash) => { + Value::Node(hash, _) => { debug_assert!(hash.len() == H::LENGTH); output.extend_from_slice(hash); }, @@ -205,26 +212,26 @@ where output } - fn extension_node( + fn extension_node( _partial: impl Iterator, _nbnibble: usize, - _child: ChildReference<::Out>, + _child: ChildReference<::Out, L>, ) -> Vec { unreachable!("No extension codec.") } - fn branch_node( - _children: impl Iterator::Out>>>>, - _maybe_value: Option, + fn branch_node( + _children: impl Iterator::Out, L>>>>, + _maybe_value: Option>, ) -> Vec { unreachable!("No extension codec.") } - fn branch_node_nibbled( + fn branch_node_nibbled( partial: impl Iterator, number_nibble: usize, - children: impl Iterator::Out>>>>, - value: Option, + children: impl Iterator::Out, L>>>>, + value: Option>, ) -> Vec { let contains_hash = matches!(&value, Some(Value::Node(..))); let mut output = match (&value, contains_hash) { @@ -244,7 +251,7 @@ where Compact(value.len() as u32).encode_to(&mut output); output.extend_from_slice(value); }, - Some(Value::Node(hash)) => { + Some(Value::Node(hash, _)) => { debug_assert!(hash.len() == H::LENGTH); output.extend_from_slice(hash); }, @@ -252,7 +259,7 @@ where } Bitmap::encode( children.map(|maybe_child| match maybe_child.borrow() { - Some(ChildReference::Hash(h)) => { + Some(ChildReference::Hash(h, _)) => { h.as_ref().encode_to(&mut output); true }, diff --git a/substrate/primitives/trie/src/recorder.rs b/substrate/primitives/trie/src/recorder.rs index 22a22b33b370..a31503c60697 100644 --- a/substrate/primitives/trie/src/recorder.rs +++ b/substrate/primitives/trie/src/recorder.rs @@ -22,7 +22,6 @@ use crate::{NodeCodec, StorageProof}; use codec::Encode; -use hash_db::Hasher; use parking_lot::{Mutex, MutexGuard}; use std::{ collections::{HashMap, HashSet}, @@ -34,7 +33,7 @@ use std::{ Arc, }, }; -use trie_db::{RecordedForKey, TrieAccess}; +use trie_db::{node_db::Hasher, Location, RecordedForKey, TrieAccess}; const LOG_TARGET: &str = "trie-recorder"; @@ -83,30 +82,36 @@ impl Default for RecorderInner { /// Owns the recorded data. Is used to transform data into a storage /// proof and to provide transaction support. The `as_trie_recorder` method provides a /// [`trie_db::TrieDB`] compatible recorder that implements the actual recording logic. -pub struct Recorder { +pub struct Recorder { inner: Arc>>, /// The estimated encoded size of the storage proof this recorder will produce. /// /// We store this in an atomic to be able to fetch the value while the `inner` is may locked. encoded_size_estimation: Arc, + _ph: PhantomData, } -impl Default for Recorder { +impl Default for Recorder { fn default() -> Self { - Self { inner: Default::default(), encoded_size_estimation: Arc::new(0.into()) } + Self { + inner: Default::default(), + encoded_size_estimation: Arc::new(0.into()), + _ph: PhantomData, + } } } -impl Clone for Recorder { +impl Clone for Recorder { fn clone(&self) -> Self { Self { inner: self.inner.clone(), encoded_size_estimation: self.encoded_size_estimation.clone(), + _ph: PhantomData, } } } -impl Recorder { +impl Recorder { /// Returns [`RecordedForKey`] per recorded key per trie. /// /// There are multiple tries when working with e.g. child tries. @@ -121,8 +126,8 @@ impl Recorder { /// /// NOTE: This locks a mutex that stays locked until the return value is dropped. #[inline] - pub fn as_trie_recorder(&self, storage_root: H::Out) -> TrieRecorder<'_, H> { - TrieRecorder:: { + pub fn as_trie_recorder(&self, storage_root: H::Out) -> TrieRecorder<'_, H, L> { + TrieRecorder:: { inner: self.inner.lock(), storage_root, encoded_size_estimation: self.encoded_size_estimation.clone(), @@ -138,7 +143,7 @@ impl Recorder { /// If you don't want to drain the recorded state, use [`Self::to_storage_proof`]. /// /// Returns the [`StorageProof`]. - pub fn drain_storage_proof(self) -> StorageProof { + pub fn drain_storage_proof(&self) -> StorageProof { let mut recorder = mem::take(&mut *self.inner.lock()); StorageProof::new(recorder.accessed_nodes.drain().map(|(_, v)| v)) } @@ -237,24 +242,24 @@ impl Recorder { } } -impl crate::ProofSizeProvider for Recorder { +impl crate::ProofSizeProvider for Recorder { fn estimate_encoded_size(&self) -> usize { Recorder::estimate_encoded_size(self) } } /// The [`TrieRecorder`](trie_db::TrieRecorder) implementation. -pub struct TrieRecorder<'a, H: Hasher> { +pub struct TrieRecorder<'a, H: Hasher, L: Location> { inner: MutexGuard<'a, RecorderInner>, storage_root: H::Out, encoded_size_estimation: Arc, - _phantom: PhantomData, + _phantom: PhantomData<(H, L)>, } -impl crate::TrieRecorderProvider for Recorder { - type Recorder<'a> = TrieRecorder<'a, H> where H: 'a; +impl crate::TrieRecorderProvider for Recorder { + type Recorder<'a> = TrieRecorder<'a, H, L> where H: 'a, L: 'a; - fn drain_storage_proof(self) -> Option { + fn drain_storage_proof(&self) -> Option { Some(Recorder::drain_storage_proof(self)) } @@ -263,7 +268,7 @@ impl crate::TrieRecorderProvider for Recorder { } } -impl<'a, H: Hasher> TrieRecorder<'a, H> { +impl<'a, H: Hasher, L: Location> TrieRecorder<'a, H, L> { /// Update the recorded keys entry for the given `full_key`. fn update_recorded_keys(&mut self, full_key: &[u8], access: RecordedForKey) { let inner = self.inner.deref_mut(); @@ -307,8 +312,8 @@ impl<'a, H: Hasher> TrieRecorder<'a, H> { } } -impl<'a, H: Hasher> trie_db::TrieRecorder for TrieRecorder<'a, H> { - fn record(&mut self, access: TrieAccess) { +impl<'a, H: Hasher, L: Location> trie_db::TrieRecorder for TrieRecorder<'a, H, L> { + fn record(&mut self, access: TrieAccess) { let mut encoded_size_update = 0; match access { @@ -429,26 +434,22 @@ impl<'a, H: Hasher> trie_db::TrieRecorder for TrieRecorder<'a, H> { #[cfg(test)] mod tests { use super::*; - use trie_db::{Trie, TrieDBBuilder, TrieDBMutBuilder, TrieHash, TrieMut, TrieRecorder}; + use trie_db::{Trie, TrieDBBuilder, TrieDBMutBuilder, TrieHash, TrieRecorder}; type MemoryDB = crate::MemoryDB; - type Layout = crate::LayoutV1; - type Recorder = super::Recorder; + type Layout = crate::LayoutV1; + type Recorder = super::Recorder; const TEST_DATA: &[(&[u8], &[u8])] = &[(b"key1", &[1; 64]), (b"key2", &[2; 64]), (b"key3", &[3; 64]), (b"key4", &[4; 64])]; fn create_trie() -> (MemoryDB, TrieHash) { let mut db = MemoryDB::default(); - let mut root = Default::default(); - - { - let mut trie = TrieDBMutBuilder::::new(&mut db, &mut root).build(); - for (k, v) in TEST_DATA { - trie.insert(k, v).expect("Inserts data"); - } + let mut trie = TrieDBMutBuilder::::new(&mut db).build(); + for (k, v) in TEST_DATA { + trie.insert(k, v).expect("Inserts data"); } - + let root = trie.commit().apply_to(&mut db); (db, root) } diff --git a/substrate/primitives/trie/src/storage_proof.rs b/substrate/primitives/trie/src/storage_proof.rs index 6c871d73b043..5ef1d5ab485c 100644 --- a/substrate/primitives/trie/src/storage_proof.rs +++ b/substrate/primitives/trie/src/storage_proof.rs @@ -15,14 +15,12 @@ // See the License for the specific language governing permissions and // limitations under the License. +use alloc::{collections::btree_set::BTreeSet, vec::Vec}; use codec::{Decode, Encode}; -use hash_db::{HashDB, Hasher}; +use core::iter::{DoubleEndedIterator, IntoIterator}; use scale_info::TypeInfo; -use sp_std::{ - collections::btree_set::BTreeSet, - iter::{DoubleEndedIterator, IntoIterator}, - vec::Vec, -}; +use trie_db::node_db::Hasher; + // Note that `LayoutV1` usage here (proof compaction) is compatible // with `LayoutV0`. use crate::LayoutV1 as Layout; @@ -105,7 +103,7 @@ impl StorageProof { root: H::Out, ) -> Result>> { let db = self.into_memory_db(); - crate::encode_compact::, crate::MemoryDB>(&db, &root) + crate::encode_compact::, crate::MemoryDB>(&db, &root) } /// Encode as a compact proof with default trie layout. @@ -114,7 +112,7 @@ impl StorageProof { root: H::Out, ) -> Result>> { let db = self.to_memory_db(); - crate::encode_compact::, crate::MemoryDB>(&db, &root) + crate::encode_compact::, crate::MemoryDB>(&db, &root) } /// Returns the estimated encoded size of the compact proof. @@ -163,7 +161,7 @@ impl CompactProof { expected_root: Option<&H::Out>, ) -> Result<(StorageProof, H::Out), crate::CompactProofError>> { let mut db = crate::MemoryDB::::new(&[]); - let root = crate::decode_compact::, _, _>( + let root = crate::decode_compact::, _>( &mut db, self.iter_compact_encoded_nodes(), expected_root, @@ -191,7 +189,7 @@ impl CompactProof { ) -> Result<(crate::MemoryDB, H::Out), crate::CompactProofError>> { let mut db = crate::MemoryDB::::new(&[]); - let root = crate::decode_compact::, _, _>( + let root = crate::decode_compact::, _>( &mut db, self.iter_compact_encoded_nodes(), expected_root, diff --git a/substrate/primitives/trie/src/trie_codec.rs b/substrate/primitives/trie/src/trie_codec.rs index f29e009c4761..75bcb8a014b4 100644 --- a/substrate/primitives/trie/src/trie_codec.rs +++ b/substrate/primitives/trie/src/trie_codec.rs @@ -20,8 +20,8 @@ //! This uses compact proof from trie crate and extends //! it to substrate specific layout and child trie system. -use crate::{CompactProof, HashDBT, TrieConfiguration, TrieHash, EMPTY_PREFIX}; -use sp_std::{boxed::Box, vec::Vec}; +use crate::{CompactProof, MemoryDB, NodeDBT, TrieConfiguration, TrieHash, EMPTY_PREFIX}; +use alloc::{boxed::Box, vec::Vec}; use trie_db::{CError, Trie}; /// Error for trie node decoding. @@ -55,18 +55,17 @@ impl From>> for Error( - db: &mut DB, +pub fn decode_compact<'a, L, I>( + db: &mut MemoryDB, encoded: I, expected_root: Option<&TrieHash>, ) -> Result, Error, CError>> where L: TrieConfiguration, - DB: HashDBT + hash_db::HashDBRef, I: IntoIterator, { let mut nodes_iter = encoded.into_iter(); - let (top_root, _nb_used) = trie_db::decode_compact_from_iter::(db, &mut nodes_iter)?; + let (top_root, _nb_used) = trie_db::decode_compact_from_iter::(db, &mut nodes_iter)?; // Only check root if expected root is passed as argument. if let Some(expected_root) = expected_root { @@ -109,7 +108,7 @@ where } } - if !HashDBT::::contains(db, &top_root, EMPTY_PREFIX) { + if !NodeDBT::::contains(db, &top_root, EMPTY_PREFIX, Default::default()) { return Err(Error::IncompleteProof) } @@ -117,7 +116,7 @@ where let mut nodes_iter = nodes_iter.peekable(); for child_root in child_tries.into_iter() { if previous_extracted_child_trie.is_none() && nodes_iter.peek().is_some() { - let (top_root, _) = trie_db::decode_compact_from_iter::(db, &mut nodes_iter)?; + let (top_root, _) = trie_db::decode_compact_from_iter::(db, &mut nodes_iter)?; previous_extracted_child_trie = Some(top_root); } @@ -155,7 +154,7 @@ pub fn encode_compact( ) -> Result, CError>> where L: TrieConfiguration, - DB: HashDBT + hash_db::HashDBRef, + DB: NodeDBT, { let mut child_tries = Vec::new(); let mut compact_proof = { @@ -191,7 +190,12 @@ where }; for child_root in child_tries { - if !HashDBT::::contains(partial_db, &child_root, EMPTY_PREFIX) { + if !NodeDBT::::contains( + partial_db, + &child_root, + EMPTY_PREFIX, + Default::default(), + ) { // child proof are allowed to be missing (unused root can be included // due to trie structure modification). continue diff --git a/substrate/primitives/trie/src/trie_stream.rs b/substrate/primitives/trie/src/trie_stream.rs index f57b80f978ff..8ab6ccabc03e 100644 --- a/substrate/primitives/trie/src/trie_stream.rs +++ b/substrate/primitives/trie/src/trie_stream.rs @@ -21,10 +21,9 @@ use crate::{ node_header::{size_and_prefix_iterator, NodeKind}, trie_constants, }; +use alloc::vec::Vec; use codec::{Compact, Encode}; -use hash_db::Hasher; -use sp_std::vec::Vec; -use trie_root; +use trie_db::{node_db::Hasher, trie_root}; /// Codec-flavored TrieStream. #[derive(Default, Clone)] diff --git a/substrate/test-utils/runtime/Cargo.toml b/substrate/test-utils/runtime/Cargo.toml index b6e6346e8017..f41823689268 100644 --- a/substrate/test-utils/runtime/Cargo.toml +++ b/substrate/test-utils/runtime/Cargo.toml @@ -44,7 +44,7 @@ pallet-timestamp = { path = "../../frame/timestamp", default-features = false } sp-consensus-grandpa = { path = "../../primitives/consensus/grandpa", default-features = false, features = ["serde"] } sp-trie = { path = "../../primitives/trie", default-features = false } sp-transaction-pool = { path = "../../primitives/transaction-pool", default-features = false } -trie-db = { version = "0.28.0", default-features = false } +trie-db = { package = "subtrie", version = "0.0.1", default-features = false } sc-service = { path = "../../client/service", default-features = false, features = ["test-helpers"], optional = true } sp-state-machine = { path = "../../primitives/state-machine", default-features = false } sp-externalities = { path = "../../primitives/externalities", default-features = false } diff --git a/substrate/test-utils/runtime/src/lib.rs b/substrate/test-utils/runtime/src/lib.rs index 63e0aa6e1379..73d6466ca6a3 100644 --- a/substrate/test-utils/runtime/src/lib.rs +++ b/substrate/test-utils/runtime/src/lib.rs @@ -50,9 +50,9 @@ use sp_application_crypto::{ecdsa, ed25519, sr25519, RuntimeAppPublic}; use sp_core::{OpaqueMetadata, RuntimeDebug}; use sp_trie::{ trie_types::{TrieDBBuilder, TrieDBMutBuilderV1}, - PrefixedMemoryDB, StorageProof, + MemoryDB, StorageProof, }; -use trie_db::{Trie, TrieMut}; +use trie_db::Trie; use sp_api::{decl_runtime_apis, impl_runtime_apis}; pub use sp_core::hash::H256; @@ -439,16 +439,14 @@ fn code_using_trie() -> u64 { ] .to_vec(); - let mut mdb = PrefixedMemoryDB::default(); - let mut root = sp_std::default::Default::default(); - { - let mut t = TrieDBMutBuilderV1::::new(&mut mdb, &mut root).build(); - for (key, value) in &pairs { - if t.insert(key, value).is_err() { - return 101 - } + let mut mdb = MemoryDB::default(); + let mut t = TrieDBMutBuilderV1::::new(&mut mdb).build(); + for (key, value) in &pairs { + if t.insert(key, value).is_err() { + return 101 } } + let root = t.commit().apply_to(&mut mdb); let trie = TrieDBBuilder::::new(&mdb, &root).build(); let res = if let Ok(iter) = trie.iter() { iter.flatten().count() as u64 } else { 102 }; @@ -812,7 +810,8 @@ fn test_read_child_storage() { fn test_witness(proof: StorageProof, root: crate::Hash) { use sp_externalities::Externalities; let db: sp_trie::MemoryDB = proof.into_memory_db(); - let backend = sp_state_machine::TrieBackendBuilder::<_, crate::Hashing>::new(db, root).build(); + let backend = + sp_state_machine::TrieBackendBuilder::::new(Box::new(db), root).build(); let mut overlay = sp_state_machine::OverlayedChanges::default(); let mut ext = sp_state_machine::Ext::new( &mut overlay, @@ -1079,14 +1078,11 @@ mod tests { } fn witness_backend() -> (sp_trie::MemoryDB, crate::Hash) { - let mut root = crate::Hash::default(); let mut mdb = sp_trie::MemoryDB::::default(); - { - let mut trie = - sp_trie::trie_types::TrieDBMutBuilderV1::new(&mut mdb, &mut root).build(); - trie.insert(b"value3", &[142]).expect("insert failed"); - trie.insert(b"value4", &[124]).expect("insert failed"); - }; + let mut trie = sp_trie::trie_types::TrieDBMutBuilderV1::new(&mut mdb).build(); + trie.insert(b"value3", &[142]).expect("insert failed"); + trie.insert(b"value4", &[124]).expect("insert failed"); + let root = trie.commit().apply_to(&mut mdb); (mdb, root) } @@ -1094,7 +1090,7 @@ mod tests { fn witness_backend_works() { let (db, root) = witness_backend(); let backend = - sp_state_machine::TrieBackendBuilder::<_, crate::Hashing>::new(db, root).build(); + sp_state_machine::TrieBackendBuilder::::new(Box::new(db), root).build(); let proof = sp_state_machine::prove_read(backend, vec![b"value3"]).unwrap(); let client = TestClientBuilder::new().build(); let runtime_api = client.runtime_api(); diff --git a/substrate/utils/binary-merkle-tree/Cargo.toml b/substrate/utils/binary-merkle-tree/Cargo.toml index 6ba515afee17..0ea6940ab91b 100644 --- a/substrate/utils/binary-merkle-tree/Cargo.toml +++ b/substrate/utils/binary-merkle-tree/Cargo.toml @@ -14,7 +14,7 @@ workspace = true [dependencies] array-bytes = { version = "6.1", optional = true } log = { optional = true, workspace = true } -hash-db = { version = "0.16.0", default-features = false } +trie-db = { package = "subtrie", version = "0.0.1", default-features = false } [dev-dependencies] array-bytes = "6.1" @@ -25,4 +25,4 @@ sp-runtime = { path = "../../primitives/runtime" } [features] debug = ["array-bytes", "log"] default = ["debug", "std"] -std = ["hash-db/std", "log/std", "sp-core/std", "sp-runtime/std"] +std = ["log/std", "sp-core/std", "sp-runtime/std", "trie-db/std"] diff --git a/substrate/utils/binary-merkle-tree/src/lib.rs b/substrate/utils/binary-merkle-tree/src/lib.rs index 0efab9186c25..2852a7989392 100644 --- a/substrate/utils/binary-merkle-tree/src/lib.rs +++ b/substrate/utils/binary-merkle-tree/src/lib.rs @@ -37,7 +37,7 @@ use alloc::vec; #[cfg(not(feature = "std"))] use alloc::vec::Vec; -use hash_db::Hasher; +use trie_db::node_db::Hasher; /// Construct a root hash of a Binary Merkle Tree created from given leaves. /// diff --git a/substrate/utils/frame/benchmarking-cli/src/storage/cmd.rs b/substrate/utils/frame/benchmarking-cli/src/storage/cmd.rs index 307c9207fdaf..a48b0398c395 100644 --- a/substrate/utils/frame/benchmarking-cli/src/storage/cmd.rs +++ b/substrate/utils/frame/benchmarking-cli/src/storage/cmd.rs @@ -21,8 +21,7 @@ use sc_client_db::DbHash; use sc_service::Configuration; use sp_blockchain::HeaderBackend; use sp_database::{ColumnId, Database}; -use sp_runtime::traits::{Block as BlockT, HashingFor}; -use sp_state_machine::Storage; +use sp_runtime::traits::Block as BlockT; use sp_storage::{ChildInfo, ChildType, PrefixedStorageKey, StateVersion}; use clap::{Args, Parser}; @@ -126,7 +125,7 @@ impl StorageCmd { cfg: Configuration, client: Arc, db: (Arc>, ColumnId), - storage: Arc>>, + storage: sc_client_db::StorageDb, ) -> Result<()> where BA: ClientBackend, diff --git a/substrate/utils/frame/benchmarking-cli/src/storage/write.rs b/substrate/utils/frame/benchmarking-cli/src/storage/write.rs index 4fa56b6e818f..8f41e67734ea 100644 --- a/substrate/utils/frame/benchmarking-cli/src/storage/write.rs +++ b/substrate/utils/frame/benchmarking-cli/src/storage/write.rs @@ -22,7 +22,7 @@ use sp_blockchain::HeaderBackend; use sp_database::{ColumnId, Transaction}; use sp_runtime::traits::{Block as BlockT, HashingFor, Header as HeaderT}; use sp_state_machine::Backend as StateBackend; -use sp_trie::PrefixedMemoryDB; +use sp_trie::MemoryDB; use log::{info, trace}; use rand::prelude::*; @@ -43,7 +43,7 @@ impl StorageCmd { &self, client: Arc, (db, state_col): (Arc>, ColumnId), - storage: Arc>>, + storage: sc_client_db::StorageDb, ) -> Result where Block: BlockT
+ Debug, @@ -57,7 +57,9 @@ impl StorageCmd { let best_hash = client.usage_info().chain.best_hash; let header = client.header(best_hash)?.ok_or("Header not found")?; let original_root = *header.state_root(); - let trie = DbStateBuilder::>::new(storage.clone(), original_root).build(); + let trie = + DbStateBuilder::>::new(Box::new(storage.clone()), original_root) + .build(); info!("Preparing keys from block {}", best_hash); // Load all KV pairs and randomly shuffle them. @@ -159,32 +161,6 @@ impl StorageCmd { } } -/// Converts a Trie transaction into a DB transaction. -/// Removals are ignored and will not be included in the final tx. -/// `invert_inserts` replaces all inserts with removals. -fn convert_tx( - db: Arc>, - mut tx: PrefixedMemoryDB>, - invert_inserts: bool, - col: ColumnId, -) -> Transaction { - let mut ret = Transaction::::default(); - - for (mut k, (v, rc)) in tx.drain().into_iter() { - if rc > 0 { - db.sanitize_key(&mut k); - if invert_inserts { - ret.remove(col, &k); - } else { - ret.set(col, &k, &v); - } - } - // < 0 means removal - ignored. - // 0 means no modification. - } - ret -} - /// Measures write benchmark /// if `child_info` exist then it means this is a child tree key fn measure_write( @@ -193,7 +169,7 @@ fn measure_write( key: Vec, new_v: Vec, version: StateVersion, - col: ColumnId, + _col: ColumnId, child_info: Option<&ChildInfo>, ) -> Result<(usize, Duration)> { let start = Instant::now(); @@ -201,23 +177,28 @@ fn measure_write( // calculate the root hash of the Trie after the modification. let replace = vec![(key.as_ref(), Some(new_v.as_ref()))]; let stx = match child_info { - Some(info) => trie.child_storage_root(info, replace.iter().cloned(), version).2, - None => trie.storage_root(replace.iter().cloned(), version).1, + Some(info) => trie.child_storage_root(info, replace.iter().cloned(), version).0, + None => trie.storage_root(replace.iter().cloned().map(|(k, v)| (k, v, None)), version), }; - // Only the keep the insertions, since we do not want to benchmark pruning. - let tx = convert_tx::(db.clone(), stx.clone(), false, col); + + let mut tx = Transaction::::default(); + sc_client_db::apply_tree_commit::>(stx, db.state_capabilities(), &mut tx); + db.commit(tx).map_err(|e| format!("Writing to the Database: {}", e))?; let result = (new_v.len(), start.elapsed()); + /* // Now undo the changes by removing what was added. - let tx = convert_tx::(db.clone(), stx.clone(), true, col); + let tx = convert_tx::(db.clone(), mdb.clone(), true, col); db.commit(tx).map_err(|e| format!("Writing to the Database: {}", e))?; + */ Ok(result) } /// Checks if a new value causes any collision in tree updates /// returns true if there is no collision /// if `child_info` exist then it means this is a child tree key +/// TODO variant with Prefixed or with location fn check_new_value( db: Arc>, trie: &DbState>, @@ -228,14 +209,16 @@ fn check_new_value( child_info: Option<&ChildInfo>, ) -> bool { let new_kv = vec![(key.as_ref(), Some(new_v.as_ref()))]; - let mut stx = match child_info { - Some(info) => trie.child_storage_root(info, new_kv.iter().cloned(), version).2, - None => trie.storage_root(new_kv.iter().cloned(), version).1, + let stx = match child_info { + Some(info) => trie.child_storage_root(info, new_kv.iter().cloned(), version).0, + None => trie.storage_root(new_kv.iter().cloned().map(|(k, v)| (k, v, None)), version), }; - for (mut k, (_, rc)) in stx.drain().into_iter() { + let mut mdb = MemoryDB::>::default(); + stx.apply_to(&mut mdb); + for (k, (_, rc)) in mdb.drain().into_iter() { if rc > 0 { - db.sanitize_key(&mut k); - if db.get(col, &k).is_some() { + //db.sanitize_key(&mut k); + if db.get(col, k.as_ref()).is_some() { trace!("Benchmark-store key creation: Key collision detected, retry"); return false } diff --git a/substrate/utils/frame/remote-externalities/src/lib.rs b/substrate/utils/frame/remote-externalities/src/lib.rs index c7399468da9d..685cd7dd5550 100644 --- a/substrate/utils/frame/remote-externalities/src/lib.rs +++ b/substrate/utils/frame/remote-externalities/src/lib.rs @@ -1193,7 +1193,7 @@ where info!( target: LOG_TARGET, "initialized state externalities with storage root {:?} and state_version {:?}", - ext.as_backend().root(), + ext.as_backend().map(|b| *b.root()), ext.state_version ); @@ -1372,8 +1372,21 @@ mod remote_tests { // there should be more keys in the child ext. assert!( - child_ext.as_backend().backend_storage().keys().len() > - ext.as_backend().backend_storage().keys().len() + child_ext + .as_backend() + .unwrap() + .backend_storage() + .as_prefixed_mem_db() + .unwrap() + .keys() + .len() > ext + .as_backend() + .unwrap() + .backend_storage() + .as_prefixed_mem_db() + .unwrap() + .keys() + .len() ); } diff --git a/substrate/utils/frame/rpc/state-trie-migration-rpc/Cargo.toml b/substrate/utils/frame/rpc/state-trie-migration-rpc/Cargo.toml index f9a45e21ce13..7d490a2d3aaa 100644 --- a/substrate/utils/frame/rpc/state-trie-migration-rpc/Cargo.toml +++ b/substrate/utils/frame/rpc/state-trie-migration-rpc/Cargo.toml @@ -22,7 +22,7 @@ serde = { features = ["derive"], workspace = true, default-features = true } sp-core = { path = "../../../../primitives/core" } sp-state-machine = { path = "../../../../primitives/state-machine" } sp-trie = { path = "../../../../primitives/trie" } -trie-db = "0.28.0" +trie-db = { package = "subtrie", version = "0.0.1" } jsonrpsee = { version = "0.22", features = ["client-core", "macros", "server"] } diff --git a/substrate/utils/frame/rpc/state-trie-migration-rpc/src/lib.rs b/substrate/utils/frame/rpc/state-trie-migration-rpc/src/lib.rs index f45258ea593d..b7de3d680684 100644 --- a/substrate/utils/frame/rpc/state-trie-migration-rpc/src/lib.rs +++ b/substrate/utils/frame/rpc/state-trie-migration-rpc/src/lib.rs @@ -34,7 +34,7 @@ use sp_core::{ use sp_state_machine::backend::AsTrieBackend; use sp_trie::{ trie_types::{TrieDB, TrieDBBuilder}, - KeySpacedDB, Trie, + KeySpacedDB, }; use trie_db::{ node::{NodePlan, ValuePlan}, @@ -42,12 +42,14 @@ use trie_db::{ }; fn count_migrate<'a, H: Hasher>( - storage: &'a dyn trie_db::HashDBRef>, + storage: &'a dyn trie_db::node_db::NodeDB, sp_state_machine::DBLocation>, root: &'a H::Out, + root_location: sp_state_machine::DBLocation, ) -> std::result::Result<(u64, u64, TrieDB<'a, 'a, H>), String> { let mut nb = 0u64; let mut total_nb = 0u64; - let trie = TrieDBBuilder::new(storage, root).build(); + let trie = TrieDBBuilder::new_with_db_location(storage, root, root_location).build(); + let iter_node = TrieDBNodeIterator::new(&trie).map_err(|e| format!("TrieDB node iterator error: {}", e))?; for node in iter_node { @@ -78,27 +80,37 @@ where { let trie_backend = backend.as_trie_backend(); let essence = trie_backend.essence(); - let (top_remaining_to_migrate, total_top, trie) = count_migrate(essence, essence.root())?; + let (top_remaining_to_migrate, total_top, trie) = + count_migrate(essence, essence.root(), Default::default())?; let mut child_remaining_to_migrate = 0; let mut total_child = 0; - let mut child_roots: Vec<(ChildInfo, Vec)> = Vec::new(); + let mut child_roots: Vec<(ChildInfo, Vec, sp_state_machine::DBLocation)> = Vec::new(); // get all child trie roots - for key_value in trie.iter().map_err(|e| format!("TrieDB node iterator error: {}", e))? { + let mut iter_node = + TrieDBNodeIterator::new(&trie).map_err(|e| format!("TrieDB node iterator error: {}", e))?; + while let Some(item) = iter_node.next() { + let item = item.map_err(|e| format!("TrieDB node iterator error: {}", e))?; + let Some(key_value) = iter_node.item_from_raw(&item) else { continue }; let (key, value) = key_value.map_err(|e| format!("TrieDB node iterator error: {}", e))?; if key[..].starts_with(sp_core::storage::well_known_keys::DEFAULT_CHILD_STORAGE_KEY_PREFIX) { + let location = item.2.node_plan().additional_ref_location(item.2.locations()); let prefixed_key = PrefixedStorageKey::new(key); let (_type, unprefixed) = ChildType::from_prefixed_key(&prefixed_key).unwrap(); - child_roots.push((ChildInfo::new_default(unprefixed), value)); + child_roots.push(( + ChildInfo::new_default(unprefixed), + value, + location.unwrap_or_default(), + )); } } - for (child_info, root) in child_roots { + for (child_info, root, location) in child_roots { let mut child_root = H::Out::default(); let storage = KeySpacedDB::new(essence, child_info.keyspace()); child_root.as_mut()[..].copy_from_slice(&root[..]); - let (nb, total_top, _) = count_migrate(&storage, &child_root)?; + let (nb, total_top, _) = count_migrate(&storage, &child_root, location)?; child_remaining_to_migrate += nb; total_child += total_top; } diff --git a/substrate/utils/frame/try-runtime/cli/src/commands/fast_forward.rs b/substrate/utils/frame/try-runtime/cli/src/commands/fast_forward.rs index f1dee16debe7..57c98a9b4f47 100644 --- a/substrate/utils/frame/try-runtime/cli/src/commands/fast_forward.rs +++ b/substrate/utils/frame/try-runtime/cli/src/commands/fast_forward.rs @@ -127,9 +127,7 @@ async fn run( let storage_changes = changes.drain_storage_changes(&externalities.backend, externalities.state_version)?; - externalities - .backend - .apply_transaction(storage_changes.transaction_storage_root, storage_changes.transaction); + externalities.backend.apply_transaction(storage_changes.transaction); Ok(()) } @@ -225,7 +223,7 @@ where for _ in 1..=command.n_blocks.unwrap_or(u64::MAX) { // We are saving state before we overwrite it while producing new block. - let backend = ext.as_backend(); + let backend = ext.as_backend().unwrap(); log::info!("Producing new empty block at height {:?}", last_block_number + One::one()); diff --git a/substrate/utils/frame/try-runtime/cli/src/commands/follow_chain.rs b/substrate/utils/frame/try-runtime/cli/src/commands/follow_chain.rs index 53db5e643463..38fba976a8a3 100644 --- a/substrate/utils/frame/try-runtime/cli/src/commands/follow_chain.rs +++ b/substrate/utils/frame/try-runtime/cli/src/commands/follow_chain.rs @@ -184,17 +184,14 @@ where ) .unwrap(); - state_ext.backend.apply_transaction( - storage_changes.transaction_storage_root, - storage_changes.transaction, - ); + state_ext.backend.apply_transaction(storage_changes.transaction); log::info!( target: LOG_TARGET, "executed block {}, consumed weight {}, new storage root {:?}", number, consumed_weight, - state_ext.as_backend().root(), + state_ext.as_backend().unwrap().root(), ); } diff --git a/substrate/utils/frame/try-runtime/cli/src/lib.rs b/substrate/utils/frame/try-runtime/cli/src/lib.rs index 73952ce816af..e19ae1d30598 100644 --- a/substrate/utils/frame/try-runtime/cli/src/lib.rs +++ b/substrate/utils/frame/try-runtime/cli/src/lib.rs @@ -54,9 +54,7 @@ use sp_runtime::{ traits::{BlakeTwo256, Block as BlockT, Hash as HashT, HashingFor, NumberFor}, DeserializeOwned, Digest, }; -use sp_state_machine::{ - CompactProof, OverlayedChanges, StateMachine, TestExternalities, TrieBackendBuilder, -}; +use sp_state_machine::{CompactProof, OverlayedChanges, StateMachine, TestExternalities}; use sp_version::StateVersion; use std::{fmt::Debug, path::PathBuf, str::FromStr}; @@ -571,15 +569,14 @@ pub(crate) fn state_machine_call_with_proof