diff --git a/.thread-sanitizer-ignore b/.thread-sanitizer-ignore index 6392c81d67..b2b659800a 100644 --- a/.thread-sanitizer-ignore +++ b/.thread-sanitizer-ignore @@ -1 +1,2 @@ race:soralog::Logger::push +deadlock:boost::di::v1_1_0::wrappers::shared diff --git a/CMakePresets.json b/CMakePresets.json index 70f93383f2..8b3992be55 100644 --- a/CMakePresets.json +++ b/CMakePresets.json @@ -3,7 +3,7 @@ "configurePresets": [ { "name": "base", - "toolchainFile": "${sourceDir}/cmake/toolchain/clang-16_cxx20.cmake", + "toolchainFile": "${sourceDir}/cmake/toolchain/clang-19_cxx20.cmake", "generator": "Ninja", "hidden": true }, diff --git a/cmake/Hunter/config.cmake b/cmake/Hunter/config.cmake index cf75bc0ca0..8ff440bb56 100644 --- a/cmake/Hunter/config.cmake +++ b/cmake/Hunter/config.cmake @@ -62,6 +62,7 @@ if ("${WASM_COMPILER}" STREQUAL "WasmEdge") CMAKE_ARGS WASMEDGE_BUILD_STATIC_LIB=ON WASMEDGE_BUILD_SHARED_LIB=OFF + CMAKE_CXX_FLAGS=-Wno-error=maybe-uninitialized KEEP_PACKAGE_SOURCES ) endif () @@ -105,7 +106,8 @@ hunter_config( hunter_config( libp2p - VERSION 0.1.28 + URL https://github.com/libp2p/cpp-libp2p/archive/c3e6cce18335c989c9bbf3485885630a6ba463e4.zip + SHA1 32698ef4c3d373a39f87e7acb60eb7dc39399653 ) hunter_config( @@ -115,7 +117,7 @@ hunter_config( hunter_config( erasure_coding_crust - VERSION 0.0.8 + VERSION 0.0.9 KEEP_PACKAGE_SOURCES ) diff --git a/cmake/Hunter/hunter-gate-url.cmake b/cmake/Hunter/hunter-gate-url.cmake index 83591dcef4..9b798f2bd1 100644 --- a/cmake/Hunter/hunter-gate-url.cmake +++ b/cmake/Hunter/hunter-gate-url.cmake @@ -1,5 +1,5 @@ HunterGate( - URL https://github.com/qdrvm/hunter/archive/refs/tags/v0.25.3-qdrvm25.zip - SHA1 59c66ff04ebd2cbdf86c3b996d38d4be6eaaa78b + URL https://github.com/qdrvm/hunter/archive/refs/tags/v0.25.3-qdrvm26.zip + SHA1 21e8e29f562962e97fc8bcd35a4ad5244794c7fc LOCAL ) \ No newline at end of file diff --git a/cmake/functions.cmake b/cmake/functions.cmake index 140d8666e5..8e00c91cd9 100644 --- a/cmake/functions.cmake +++ b/cmake/functions.cmake @@ -23,6 +23,9 @@ function(addtest test_name) LIBRARY_OUTPUT_PATH ${CMAKE_BINARY_DIR}/test_lib ) disable_clang_tidy(${test_name}) + if(KAGOME_CTEST_ENV) + set_tests_properties(${test_name} PROPERTIES ENVIRONMENT "${KAGOME_CTEST_ENV}") + endif() endfunction() function(addtest_part test_name) diff --git a/cmake/toolchain/flags/sanitize_thread.cmake b/cmake/toolchain/flags/sanitize_thread.cmake index def027cde6..8527369c55 100644 --- a/cmake/toolchain/flags/sanitize_thread.cmake +++ b/cmake/toolchain/flags/sanitize_thread.cmake @@ -11,7 +11,7 @@ include(${CMAKE_CURRENT_LIST_DIR}/../../add_cache_flag.cmake) set(TSAN_IGNORELIST "${CMAKE_CURRENT_LIST_DIR}/../../../.thread-sanitizer-ignore") -set(ENV{TSAN_OPTIONS} "suppressions=${TSAN_IGNORELIST}") +list(APPEND KAGOME_CTEST_ENV "TSAN_OPTIONS=suppressions=${TSAN_IGNORELIST}") set(FLAGS -fsanitize=thread diff --git a/core/api/service/chain/impl/chain_api_impl.cpp b/core/api/service/chain/impl/chain_api_impl.cpp index 4f20b83b9c..019acfb623 100644 --- a/core/api/service/chain/impl/chain_api_impl.cpp +++ b/core/api/service/chain/impl/chain_api_impl.cpp @@ -28,16 +28,12 @@ namespace kagome::api { using primitives::BlockNumber; ChainApiImpl::ChainApiImpl( - std::shared_ptr block_repo, std::shared_ptr block_tree, std::shared_ptr block_storage, LazySPtr api_service) - : header_repo_{std::move(block_repo)}, - block_tree_{std::move(block_tree)}, + : block_tree_{std::move(block_tree)}, api_service_{api_service}, block_storage_{std::move(block_storage)} { - BOOST_ASSERT_MSG(header_repo_ != nullptr, - "block repo parameter is nullptr"); BOOST_ASSERT_MSG(block_tree_ != nullptr, "block tree parameter is nullptr"); BOOST_ASSERT(block_storage_); } @@ -48,7 +44,7 @@ namespace kagome::api { } outcome::result ChainApiImpl::getBlockHash( BlockNumber value) const { - return header_repo_->getHashByNumber(value); + return block_tree_->getHashByNumber(value); } outcome::result ChainApiImpl::getBlockHash( diff --git a/core/api/service/chain/impl/chain_api_impl.hpp b/core/api/service/chain/impl/chain_api_impl.hpp index e6128e7c7c..804bc34acc 100644 --- a/core/api/service/chain/impl/chain_api_impl.hpp +++ b/core/api/service/chain/impl/chain_api_impl.hpp @@ -26,8 +26,7 @@ namespace kagome::api { ~ChainApiImpl() override = default; - ChainApiImpl(std::shared_ptr block_repo, - std::shared_ptr block_tree, + ChainApiImpl(std::shared_ptr block_tree, std::shared_ptr block_storage, LazySPtr api_service); @@ -44,12 +43,12 @@ namespace kagome::api { outcome::result getHeader( std::string_view hash) override { OUTCOME_TRY(h, primitives::BlockHash::fromHexWithPrefix(hash)); - return header_repo_->getBlockHeader(h); + return block_tree_->getBlockHeader(h); } outcome::result getHeader() override { auto last = block_tree_->getLastFinalized(); - return header_repo_->getBlockHeader(last.hash); + return block_tree_->getBlockHeader(last.hash); } outcome::result getBlock( @@ -67,7 +66,6 @@ namespace kagome::api { uint32_t subscription_id) override; private: - std::shared_ptr header_repo_; std::shared_ptr block_tree_; LazySPtr api_service_; std::shared_ptr block_storage_; diff --git a/core/api/service/child_state/impl/child_state_api_impl.cpp b/core/api/service/child_state/impl/child_state_api_impl.cpp index 1777fc68c6..579d16ca4e 100644 --- a/core/api/service/child_state/impl/child_state_api_impl.cpp +++ b/core/api/service/child_state/impl/child_state_api_impl.cpp @@ -18,17 +18,14 @@ namespace kagome::api { ChildStateApiImpl::ChildStateApiImpl( - std::shared_ptr block_repo, std::shared_ptr trie_storage, std::shared_ptr block_tree, std::shared_ptr runtime_core, std::shared_ptr metadata) - : header_repo_{std::move(block_repo)}, - storage_{std::move(trie_storage)}, + : storage_{std::move(trie_storage)}, block_tree_{std::move(block_tree)}, runtime_core_{std::move(runtime_core)}, metadata_{std::move(metadata)} { - BOOST_ASSERT(nullptr != header_repo_); BOOST_ASSERT(nullptr != storage_); BOOST_ASSERT(nullptr != block_tree_); BOOST_ASSERT(nullptr != runtime_core_); @@ -43,7 +40,7 @@ namespace kagome::api { const auto &block_hash = block_hash_opt.value_or(block_tree_->getLastFinalized().hash); - OUTCOME_TRY(header, header_repo_->getBlockHeader(block_hash)); + OUTCOME_TRY(header, block_tree_->getBlockHeader(block_hash)); OUTCOME_TRY(initial_trie_reader, storage_->getEphemeralBatchAt(header.state_root)); OUTCOME_TRY(child_root, initial_trie_reader->get(child_storage_key)); @@ -81,7 +78,7 @@ namespace kagome::api { const auto &block_hash = block_hash_opt.value_or(block_tree_->getLastFinalized().hash); - OUTCOME_TRY(header, header_repo_->getBlockHeader(block_hash)); + OUTCOME_TRY(header, block_tree_->getBlockHeader(block_hash)); OUTCOME_TRY(initial_trie_reader, storage_->getEphemeralBatchAt(header.state_root)); OUTCOME_TRY(child_root, initial_trie_reader->get(child_storage_key)); @@ -123,7 +120,7 @@ namespace kagome::api { const std::optional &block_hash_opt) const { auto at = block_hash_opt ? block_hash_opt.value() : block_tree_->getLastFinalized().hash; - OUTCOME_TRY(header, header_repo_->getBlockHeader(at)); + OUTCOME_TRY(header, block_tree_->getBlockHeader(at)); OUTCOME_TRY(trie_reader, storage_->getEphemeralBatchAt(header.state_root)); OUTCOME_TRY(child_root, trie_reader->get(child_storage_key)); OUTCOME_TRY(child_root_hash, common::Hash256::fromSpan(child_root)); @@ -154,7 +151,7 @@ namespace kagome::api { const std::optional &block_hash_opt) const { auto at = block_hash_opt ? block_hash_opt.value() : block_tree_->getLastFinalized().hash; - OUTCOME_TRY(header, header_repo_->getBlockHeader(at)); + OUTCOME_TRY(header, block_tree_->getBlockHeader(at)); OUTCOME_TRY(trie_reader, storage_->getEphemeralBatchAt(header.state_root)); OUTCOME_TRY(child_root, trie_reader->get(child_storage_key)); OUTCOME_TRY(child_root_hash, common::Hash256::fromSpan(child_root)); diff --git a/core/api/service/child_state/impl/child_state_api_impl.hpp b/core/api/service/child_state/impl/child_state_api_impl.hpp index 0d591312f7..85cfdbe447 100644 --- a/core/api/service/child_state/impl/child_state_api_impl.hpp +++ b/core/api/service/child_state/impl/child_state_api_impl.hpp @@ -8,7 +8,6 @@ #include "api/service/child_state/child_state_api.hpp" -#include "blockchain/block_header_repository.hpp" #include "blockchain/block_tree.hpp" #include "injector/lazy.hpp" #include "runtime/runtime_api/core.hpp" @@ -20,7 +19,6 @@ namespace kagome::api { class ChildStateApiImpl final : public ChildStateApi { public: ChildStateApiImpl( - std::shared_ptr block_repo, std::shared_ptr trie_storage, std::shared_ptr block_tree, std::shared_ptr runtime_core, @@ -59,7 +57,6 @@ namespace kagome::api { const override; private: - std::shared_ptr header_repo_; std::shared_ptr storage_; std::shared_ptr block_tree_; std::shared_ptr runtime_core_; diff --git a/core/api/service/impl/api_service_impl.cpp b/core/api/service/impl/api_service_impl.cpp index 690a2efbbc..38cf51eb04 100644 --- a/core/api/service/impl/api_service_impl.cpp +++ b/core/api/service/impl/api_service_impl.cpp @@ -285,11 +285,10 @@ namespace kagome::api { auto &session = session_context.storage_sub; const auto id = session->generateSubscriptionSetId(); const auto &best_block_hash = block_tree_->bestBlock().hash; - const auto &header = - block_tree_->getBlockHeader(best_block_hash); - BOOST_ASSERT(header.has_value()); - auto batch_res = trie_storage_->getEphemeralBatchAt( - header.value().state_root); + OUTCOME_TRY(header, + block_tree_->getBlockHeader(best_block_hash)); + auto batch_res = + trie_storage_->getEphemeralBatchAt(header.state_root); if (!batch_res.has_value()) { SL_ERROR(logger_, "Failed to get storage state for block {}, required " diff --git a/core/api/service/state/impl/state_api_impl.cpp b/core/api/service/state/impl/state_api_impl.cpp index 1999ae4d19..bb94f1d3f7 100644 --- a/core/api/service/state/impl/state_api_impl.cpp +++ b/core/api/service/state/impl/state_api_impl.cpp @@ -39,21 +39,18 @@ OUTCOME_CPP_DEFINE_CATEGORY(kagome::api, StateApiImpl::Error, e) { namespace kagome::api { StateApiImpl::StateApiImpl( - std::shared_ptr block_repo, std::shared_ptr trie_storage, std::shared_ptr block_tree, std::shared_ptr runtime_core, std::shared_ptr metadata, std::shared_ptr executor, LazySPtr api_service) - : header_repo_{std::move(block_repo)}, - storage_{std::move(trie_storage)}, + : storage_{std::move(trie_storage)}, block_tree_{std::move(block_tree)}, runtime_core_{std::move(runtime_core)}, api_service_{api_service}, metadata_{std::move(metadata)}, executor_{std::move(executor)} { - BOOST_ASSERT(nullptr != header_repo_); BOOST_ASSERT(nullptr != storage_); BOOST_ASSERT(nullptr != block_tree_); BOOST_ASSERT(nullptr != runtime_core_); @@ -81,7 +78,7 @@ namespace kagome::api { const auto &block_hash = block_hash_opt.value_or(block_tree_->getLastFinalized().hash); - OUTCOME_TRY(header, header_repo_->getBlockHeader(block_hash)); + OUTCOME_TRY(header, block_tree_->getBlockHeader(block_hash)); OUTCOME_TRY(initial_trie_reader, storage_->getEphemeralBatchAt(header.state_root)); auto cursor = initial_trie_reader->trieCursor(); @@ -121,7 +118,7 @@ namespace kagome::api { outcome::result> StateApiImpl::getStorageAt( common::BufferView key, const primitives::BlockHash &at) const { - OUTCOME_TRY(header, header_repo_->getBlockHeader(at)); + OUTCOME_TRY(header, block_tree_->getBlockHeader(at)); OUTCOME_TRY(trie_reader, storage_->getEphemeralBatchAt(header.state_root)); auto res = trie_reader->tryGet(key); return common::map_result_optional( @@ -134,7 +131,7 @@ namespace kagome::api { const std::optional &block_hash_opt) const { auto at = block_hash_opt ? block_hash_opt.value() : block_tree_->getLastFinalized().hash; - OUTCOME_TRY(header, header_repo_->getBlockHeader(at)); + OUTCOME_TRY(header, block_tree_->getBlockHeader(at)); OUTCOME_TRY(trie_reader, storage_->getEphemeralBatchAt(header.state_root)); OUTCOME_TRY(res, trie_reader->tryGet(key)); return res ? std::make_optional(res->size()) : std::nullopt; @@ -152,8 +149,8 @@ namespace kagome::api { } if (from != to) { - OUTCOME_TRY(from_number, header_repo_->getNumberByHash(from)); - OUTCOME_TRY(to_number, header_repo_->getNumberByHash(to)); + OUTCOME_TRY(from_number, block_tree_->getNumberByHash(from)); + OUTCOME_TRY(to_number, block_tree_->getNumberByHash(to)); if (to_number < from_number) { return Error::END_BLOCK_LOWER_THAN_BEGIN_BLOCK; } @@ -169,7 +166,7 @@ namespace kagome::api { // returning the whole vector with block ids OUTCOME_TRY(range, block_tree_->getChainByBlocks(from, to)); for (auto &block : range) { - OUTCOME_TRY(header, header_repo_->getBlockHeader(block)); + OUTCOME_TRY(header, block_tree_->getBlockHeader(block)); OUTCOME_TRY(batch, storage_->getEphemeralBatchAt(header.state_root)); StorageChangeSet change{.block = block}; for (auto &key : keys) { @@ -206,7 +203,7 @@ namespace kagome::api { auto at = opt_at.has_value() ? opt_at.value() : block_tree_->bestBlock().hash; storage::trie::OnRead db; - OUTCOME_TRY(header, header_repo_->getBlockHeader(at)); + OUTCOME_TRY(header, block_tree_->getBlockHeader(at)); OUTCOME_TRY( trie, storage_->getProofReaderBatchAt(header.state_root, db.onRead())); for (auto &key : keys) { diff --git a/core/api/service/state/impl/state_api_impl.hpp b/core/api/service/state/impl/state_api_impl.hpp index dc301e3741..dce2da9fc5 100644 --- a/core/api/service/state/impl/state_api_impl.hpp +++ b/core/api/service/state/impl/state_api_impl.hpp @@ -8,7 +8,6 @@ #include "api/service/state/state_api.hpp" -#include "blockchain/block_header_repository.hpp" #include "blockchain/block_tree.hpp" #include "injector/lazy.hpp" #include "runtime/runtime_api/core.hpp" @@ -32,8 +31,7 @@ namespace kagome::api { static constexpr size_t kMaxBlockRange = 256; static constexpr size_t kMaxKeySetSize = 64; - StateApiImpl(std::shared_ptr block_repo, - std::shared_ptr trie_storage, + StateApiImpl(std::shared_ptr trie_storage, std::shared_ptr block_tree, std::shared_ptr runtime_core, std::shared_ptr metadata, @@ -94,7 +92,6 @@ namespace kagome::api { std::string_view hex_block_hash) override; private: - std::shared_ptr header_repo_; std::shared_ptr storage_; std::shared_ptr block_tree_; std::shared_ptr runtime_core_; diff --git a/core/application/app_configuration.hpp b/core/application/app_configuration.hpp index 6a5807553c..69c4140716 100644 --- a/core/application/app_configuration.hpp +++ b/core/application/app_configuration.hpp @@ -133,6 +133,11 @@ namespace kagome::application { */ virtual uint32_t luckyPeers() const = 0; + /** + * @return maximum number of peer connections + */ + virtual uint32_t maxPeers() const = 0; + /** * @return multiaddresses of bootstrat nodes */ @@ -213,8 +218,8 @@ namespace kagome::application { * List of telemetry endpoints specified via CLI argument or config file * @return a vector of parsed telemetry endpoints */ - virtual const std::vector & - telemetryEndpoints() const = 0; + virtual const std::vector + &telemetryEndpoints() const = 0; /** * @return enum constant of the chosen sync method diff --git a/core/application/impl/app_configuration_impl.cpp b/core/application/impl/app_configuration_impl.cpp index 7e34615377..c4130d05c7 100644 --- a/core/application/impl/app_configuration_impl.cpp +++ b/core/application/impl/app_configuration_impl.cpp @@ -105,6 +105,7 @@ namespace { const uint32_t def_in_peers = 75; const uint32_t def_in_peers_light = 100; const auto def_lucky_peers = 4; + const auto def_max_peers = 1000; const uint32_t def_random_walk_interval = 15; const auto def_full_sync = "Full"; const auto def_wasm_execution = "Interpreted"; @@ -180,7 +181,7 @@ namespace { #if KAGOME_WASM_COMPILER_WASM_EDGE == 1 "WasmEdge", #endif - "Binaryen" + "Binaryen" }; static const std::string interpreters_str = @@ -282,6 +283,7 @@ namespace kagome::application { in_peers_(def_in_peers), in_peers_light_(def_in_peers_light), lucky_peers_(def_lucky_peers), + max_peers_(def_max_peers), dev_mode_(def_dev_mode), node_name_(randomNodeName()), node_version_(buildVersion()), @@ -495,6 +497,7 @@ namespace kagome::application { load_u32(val, "in-peers", in_peers_); load_u32(val, "in-peers-light", in_peers_light_); load_u32(val, "lucky-peers", lucky_peers_); + load_u32(val, "max-peers", max_peers_); load_telemetry_uris(val, "telemetry-endpoints", telemetry_endpoints_); load_u32(val, "random-walk-interval", random_walk_interval_); } @@ -827,7 +830,8 @@ namespace kagome::application { ("out-peers", po::value()->default_value(def_out_peers), "number of outgoing connections we're trying to maintain") ("in-peers", po::value()->default_value(def_in_peers), "maximum number of inbound full nodes peers") ("in-peers-light", po::value()->default_value(def_in_peers_light), "maximum number of inbound light nodes peers") - ("lucky-peers", po::value()->default_value(def_lucky_peers), "number of \"lucky\" peers (peers that are being gossiped to). -1 for broadcast." ) + ("lucky-peers", po::value()->default_value(def_lucky_peers), "number of \"lucky\" peers (peers that are being gossiped to). -1 for full broadcast." ) + ("max-peers", po::value()->default_value(def_max_peers), "maximum number of peer connections" ) ("max-blocks-in-response", po::value(), "max block per response while syncing") ("name", po::value(), "the human-readable name for this node") ("no-telemetry", po::bool_switch(), "Disables telemetry broadcasting") @@ -912,8 +916,8 @@ namespace kagome::application { } if (vm.count("help") > 0) { - std::cout - << "Available subcommands: storage-explorer db-editor benchmark key\n"; + std::cout << "Available subcommands: storage-explorer db-editor " + "benchmark key\n"; std::cout << desc << '\n'; return false; } @@ -1329,6 +1333,8 @@ namespace kagome::application { find_argument( vm, "lucky-peers", [&](int32_t val) { lucky_peers_ = val; }); + max_peers_ = vm.at("max-peers").as(); + find_argument(vm, "ws-max-connections", [&](uint32_t val) { max_ws_connections_ = val; }); diff --git a/core/application/impl/app_configuration_impl.hpp b/core/application/impl/app_configuration_impl.hpp index 2421b9f874..5022fb94eb 100644 --- a/core/application/impl/app_configuration_impl.hpp +++ b/core/application/impl/app_configuration_impl.hpp @@ -117,6 +117,9 @@ namespace kagome::application { uint32_t luckyPeers() const override { return lucky_peers_; } + uint32_t maxPeers() const override { + return max_peers_; + } const boost::asio::ip::tcp::endpoint &rpcEndpoint() const override { return rpc_endpoint_; } @@ -348,6 +351,7 @@ namespace kagome::application { uint32_t in_peers_; uint32_t in_peers_light_; uint32_t lucky_peers_; + uint32_t max_peers_; network::PeeringConfig peering_config_; bool dev_mode_; std::string node_name_; diff --git a/core/application/impl/kagome_application_impl.cpp b/core/application/impl/kagome_application_impl.cpp index bbfd896221..0fde7f639e 100644 --- a/core/application/impl/kagome_application_impl.cpp +++ b/core/application/impl/kagome_application_impl.cpp @@ -6,7 +6,6 @@ #include "application/impl/kagome_application_impl.hpp" -#include #include #include @@ -18,7 +17,6 @@ #include "injector/application_injector.hpp" #include "metrics/metrics.hpp" #include "parachain/pvf/secure_mode_precheck.hpp" -#include "storage/migrations/migrations.hpp" #include "telemetry/service.hpp" #include "utils/watchdog.hpp" @@ -72,11 +70,10 @@ namespace kagome::application { getpid()); auto chain_path = app_config_->chainPath(chain_spec_->id()); - const char *storage_backend = - app_config_->storageBackend() - == AppConfiguration::StorageBackend::RocksDB - ? "RocksDB" - : "Unknown"; + auto storage_backend = app_config_->storageBackend() + == AppConfiguration::StorageBackend::RocksDB + ? "RocksDB" + : "Unknown"; logger_->info("Chain path is {}, storage backend is {}", chain_path.native(), storage_backend); @@ -129,7 +126,7 @@ namespace kagome::application { if (not app_config_->disableSecureMode() and app_config_->usePvfSubprocess() and app_config_->roles().isAuthority()) { auto res = parachain::runSecureModeCheckProcess( - *injector_.injectIoContext(), app_config_->runtimeCacheDirPath()); + app_config_->runtimeCacheDirPath()); if (!res) { SL_ERROR(logger_, "Secure mode check failed: {}", res.error()); exit(EXIT_FAILURE); @@ -147,13 +144,6 @@ namespace kagome::application { "platform. Proceed at your own risk."); #endif - if (app_config_->enableDbMigration()) { - if (auto res = storage::migrations::runMigrations(injector_); !res) { - SL_ERROR(logger_, "Failed to migrate the database: {}", res.error()); - exit(EXIT_FAILURE); - } - } - app_state_manager->run(); watchdog->stop(); diff --git a/core/application/modes/recovery_mode.cpp b/core/application/modes/recovery_mode.cpp index 179d293ba2..6b86e291d6 100644 --- a/core/application/modes/recovery_mode.cpp +++ b/core/application/modes/recovery_mode.cpp @@ -19,21 +19,18 @@ namespace kagome::application::mode { const AppConfiguration &app_config, std::shared_ptr spaced_storage, std::shared_ptr storage, - std::shared_ptr header_repo, std::shared_ptr trie_storage, std::shared_ptr authority_manager, std::shared_ptr block_tree) : app_config_(app_config), spaced_storage_(std::move(spaced_storage)), storage_(std::move(storage)), - header_repo_(std::move(header_repo)), trie_storage_(std::move(trie_storage)), authority_manager_(std::move(authority_manager)), block_tree_(std::move(block_tree)), log_(log::createLogger("RecoveryMode", "main")) { BOOST_ASSERT(spaced_storage_ != nullptr); BOOST_ASSERT(storage_ != nullptr); - BOOST_ASSERT(header_repo_ != nullptr); BOOST_ASSERT(trie_storage_ != nullptr); BOOST_ASSERT(authority_manager_ != nullptr); BOOST_ASSERT(block_tree_ != nullptr); @@ -44,7 +41,6 @@ namespace kagome::application::mode { auto res = blockchain::BlockTreeImpl::recover(app_config_.recoverState().value(), storage_, - header_repo_, trie_storage_, block_tree_); if (res.has_error()) { diff --git a/core/application/modes/recovery_mode.hpp b/core/application/modes/recovery_mode.hpp index ff00f4b84c..a479475989 100644 --- a/core/application/modes/recovery_mode.hpp +++ b/core/application/modes/recovery_mode.hpp @@ -18,7 +18,6 @@ namespace kagome::application { namespace kagome::blockchain { class BlockStorage; - class BlockHeaderRepository; class BlockTree; } // namespace kagome::blockchain @@ -43,7 +42,6 @@ namespace kagome::application::mode { const application::AppConfiguration &app_config, std::shared_ptr spaced_storage, std::shared_ptr storage, - std::shared_ptr header_repo, std::shared_ptr trie_storage, std::shared_ptr authority_manager, std::shared_ptr block_tree); @@ -54,7 +52,6 @@ namespace kagome::application::mode { const application::AppConfiguration &app_config_; std::shared_ptr spaced_storage_; std::shared_ptr storage_; - std::shared_ptr header_repo_; std::shared_ptr trie_storage_; std::shared_ptr authority_manager_; std::shared_ptr block_tree_; diff --git a/core/authority_discovery/CMakeLists.txt b/core/authority_discovery/CMakeLists.txt index f4e6477baa..c2dc01f7d4 100644 --- a/core/authority_discovery/CMakeLists.txt +++ b/core/authority_discovery/CMakeLists.txt @@ -15,6 +15,7 @@ add_library(address_publisher target_link_libraries(address_publisher authority_discovery_proto logger + p2p::p2p_kademlia scale_libp2p_types sha ) diff --git a/core/authority_discovery/publisher/address_publisher.cpp b/core/authority_discovery/publisher/address_publisher.cpp index f29fede33e..7ce0c73ba9 100644 --- a/core/authority_discovery/publisher/address_publisher.cpp +++ b/core/authority_discovery/publisher/address_publisher.cpp @@ -117,6 +117,26 @@ namespace kagome::authority_discovery { return outcome::success(); } + OUTCOME_TRY( + raw, + audiEncode(ed_crypto_provider_, + sr_crypto_provider_, + *libp2p_key_, + *libp2p_key_pb_, + peer_info, + *audi_key, + std::chrono::system_clock::now().time_since_epoch())); + return kademlia_->putValue(std::move(raw.first), std::move(raw.second)); + } + + outcome::result> audiEncode( + std::shared_ptr ed_crypto_provider, + std::shared_ptr sr_crypto_provider, + const Ed25519Keypair &libp2p_key, + const ProtobufKey &libp2p_key_pb, + const PeerInfo &peer_info, + const Sr25519Keypair &audi_key, + std::optional now) { std::unordered_set addresses; for (const auto &address : peer_info.addresses) { if (address.getPeerId()) { @@ -133,27 +153,24 @@ namespace kagome::authority_discovery { for (const auto &address : addresses) { PB_SPAN_ADD(record, addresses, address.getBytesAddress()); } - TimestampScale time{std::chrono::nanoseconds{ - std::chrono::system_clock::now().time_since_epoch()} - .count()}; - PB_SPAN_SET(*record.mutable_creation_time(), - timestamp, - scale::encode(time).value()); + if (now) { + TimestampScale time{now->count()}; + OUTCOME_TRY(encoded_time, scale::encode(time)); + PB_SPAN_SET(*record.mutable_creation_time(), timestamp, encoded_time); + } auto record_pb = pbEncodeVec(record); - OUTCOME_TRY(signature, ed_crypto_provider_->sign(*libp2p_key_, record_pb)); - OUTCOME_TRY(auth_signature, - sr_crypto_provider_->sign(*audi_key, record_pb)); + OUTCOME_TRY(signature, ed_crypto_provider->sign(libp2p_key, record_pb)); + OUTCOME_TRY(auth_signature, sr_crypto_provider->sign(audi_key, record_pb)); ::authority_discovery_v3::SignedAuthorityRecord signed_record; PB_SPAN_SET(signed_record, auth_signature, auth_signature); PB_SPAN_SET(signed_record, record, record_pb); auto &ps = *signed_record.mutable_peer_signature(); PB_SPAN_SET(ps, signature, signature); - PB_SPAN_SET(ps, public_key, libp2p_key_pb_->key); + PB_SPAN_SET(ps, public_key, libp2p_key_pb.key); - auto hash = crypto::sha256(audi_key->public_key); - return kademlia_->putValue({hash.begin(), hash.end()}, - pbEncodeVec(signed_record)); + auto hash = crypto::sha256(audi_key.public_key); + return std::make_pair(Buffer{hash}, Buffer{pbEncodeVec(signed_record)}); } } // namespace kagome::authority_discovery diff --git a/core/authority_discovery/publisher/address_publisher.hpp b/core/authority_discovery/publisher/address_publisher.hpp index 549f60c447..a304991d3f 100644 --- a/core/authority_discovery/publisher/address_publisher.hpp +++ b/core/authority_discovery/publisher/address_publisher.hpp @@ -21,6 +21,22 @@ #include namespace kagome::authority_discovery { + using crypto::Ed25519Keypair; + using crypto::Ed25519Provider; + using crypto::Sr25519Keypair; + using crypto::Sr25519Provider; + using libp2p::crypto::ProtobufKey; + using libp2p::peer::PeerInfo; + + outcome::result> audiEncode( + std::shared_ptr ed_crypto_provider, + std::shared_ptr sr_crypto_provider, + const Ed25519Keypair &libp2p_key, + const ProtobufKey &libp2p_key_pb, + const PeerInfo &peer_info, + const Sr25519Keypair &audi_key, + std::optional now); + /** * Publishes listening addresses for authority discovery. * Authority discovery public key is used for Kademlia DHT key. diff --git a/core/authority_discovery/query/audi_store_impl.hpp b/core/authority_discovery/query/audi_store_impl.hpp index 381e8e3e35..e13c64a126 100644 --- a/core/authority_discovery/query/audi_store_impl.hpp +++ b/core/authority_discovery/query/audi_store_impl.hpp @@ -39,7 +39,7 @@ namespace kagome::authority_discovery { const AuthorityPeerInfo &)> f) override; private: - std::shared_ptr space_; + std::shared_ptr space_; log::Logger log_; }; diff --git a/core/authority_discovery/query/query_impl.cpp b/core/authority_discovery/query/query_impl.cpp index 50b9703c78..8e04c1b40b 100644 --- a/core/authority_discovery/query/query_impl.cpp +++ b/core/authority_discovery/query/query_impl.cpp @@ -39,7 +39,7 @@ namespace kagome::authority_discovery { std::shared_ptr app_state_manager, std::shared_ptr block_tree, std::shared_ptr authority_discovery_api, - LazySPtr validation_protocol, + LazySPtr validation_protocol, std::shared_ptr key_store, std::shared_ptr audi_store, std::shared_ptr sr_crypto_provider, @@ -344,7 +344,7 @@ namespace kagome::authority_discovery { authority, AuthorityPeerInfo{ .raw = std::move(signed_record_pb), - .time = time.has_value() ? std::make_optional(*time) + .time = time.has_value() ? std::make_optional(TimestampScale{*time}) : std::nullopt, .peer = peer, }); diff --git a/core/authority_discovery/query/query_impl.hpp b/core/authority_discovery/query/query_impl.hpp index 896d5b4e16..a00e7d525c 100644 --- a/core/authority_discovery/query/query_impl.hpp +++ b/core/authority_discovery/query/query_impl.hpp @@ -27,7 +27,7 @@ #include namespace kagome::network { - class ValidationProtocol; + class ValidationProtocolReserve; } // namespace kagome::network namespace kagome::authority_discovery { @@ -47,7 +47,7 @@ namespace kagome::authority_discovery { std::shared_ptr app_state_manager, std::shared_ptr block_tree, std::shared_ptr authority_discovery_api, - LazySPtr validation_protocol, + LazySPtr validation_protocol, std::shared_ptr key_store, std::shared_ptr audi_store, std::shared_ptr sr_crypto_provider, @@ -85,7 +85,7 @@ namespace kagome::authority_discovery { std::shared_ptr block_tree_; std::shared_ptr authority_discovery_api_; - LazySPtr validation_protocol_; + LazySPtr validation_protocol_; std::shared_ptr key_store_; std::shared_ptr audi_store_; std::shared_ptr sr_crypto_provider_; diff --git a/core/blockchain/CMakeLists.txt b/core/blockchain/CMakeLists.txt index 572ca9875e..eb2afb2da3 100644 --- a/core/blockchain/CMakeLists.txt +++ b/core/blockchain/CMakeLists.txt @@ -12,7 +12,6 @@ add_library(blockchain impl/block_storage_error.cpp impl/justification_storage_policy.cpp impl/block_storage_impl.cpp - impl/block_header_repository_impl.cpp genesis_block_hash.cpp ) target_link_libraries(blockchain diff --git a/core/blockchain/block_header_repository.hpp b/core/blockchain/block_header_repository.hpp index a18e6c73cf..13550ef2e0 100644 --- a/core/blockchain/block_header_repository.hpp +++ b/core/blockchain/block_header_repository.hpp @@ -55,11 +55,12 @@ namespace kagome::blockchain { const primitives::BlockHash &block_hash) const = 0; /** - * @return status of a block with corresponding {@param block_hash} or a - * storage error + * @return block header with corresponding {@param block_hash} or a none + * optional if the corresponding block header is not in storage or a storage + * error */ - virtual outcome::result getBlockStatus( - const primitives::BlockHash &block_hash) const = 0; + virtual outcome::result> + tryGetBlockHeader(const primitives::BlockHash &block_hash) const = 0; /** * @param id of a block which number is returned diff --git a/core/blockchain/block_storage.hpp b/core/blockchain/block_storage.hpp index e60754f460..5c0b447473 100644 --- a/core/blockchain/block_storage.hpp +++ b/core/blockchain/block_storage.hpp @@ -93,8 +93,17 @@ namespace kagome::blockchain { * Tries to get block header by {@param block_hash} * @returns block header or error */ + virtual outcome::result getBlockHeader( + const primitives::BlockHash &block_hash) const = 0; + + /** + * Attempts to retrieve the block header for the given {@param block_hash}. + * @param block_hash The hash of the block whose header is to be retrieved. + * @returns An optional containing the block header if found, std::nullopt + * if not found, or an error if the operation fails. + */ virtual outcome::result> - getBlockHeader(const primitives::BlockHash &block_hash) const = 0; + tryGetBlockHeader(const primitives::BlockHash &block_hash) const = 0; // -- body -- diff --git a/core/blockchain/block_tree.hpp b/core/blockchain/block_tree.hpp index 70e8d07808..53569a9105 100644 --- a/core/blockchain/block_tree.hpp +++ b/core/blockchain/block_tree.hpp @@ -10,6 +10,7 @@ #include #include +#include "blockchain/block_header_repository.hpp" #include "consensus/timeline/types.hpp" #include "outcome/outcome.hpp" #include "primitives/block.hpp" @@ -27,7 +28,7 @@ namespace kagome::blockchain { * production (handling forks, pruning the blocks, resolving child-parent * relations, etc) */ - class BlockTree { + class BlockTree : public BlockHeaderRepository { public: using BlockHashVecRes = outcome::result>; @@ -53,14 +54,6 @@ namespace kagome::blockchain { */ virtual bool has(const primitives::BlockHash &hash) const = 0; - /** - * Get block header by provided block id - * @param block_hash hash of the block header we are looking for - * @return result containing block header if it exists, error otherwise - */ - virtual outcome::result getBlockHeader( - const primitives::BlockHash &block_hash) const = 0; - /** * Get a body (extrinsics) of the block (if present) * @param block_hash hash of the block to get body for diff --git a/core/blockchain/impl/block_header_repository_impl.cpp b/core/blockchain/impl/block_header_repository_impl.cpp deleted file mode 100644 index 11afe95ed0..0000000000 --- a/core/blockchain/impl/block_header_repository_impl.cpp +++ /dev/null @@ -1,66 +0,0 @@ -/** - * Copyright Quadrivium LLC - * All Rights Reserved - * SPDX-License-Identifier: Apache-2.0 - */ - -#include "blockchain/impl/block_header_repository_impl.hpp" - -#include - -#include "blockchain/block_tree_error.hpp" -#include "blockchain/impl/storage_util.hpp" -#include "scale/scale.hpp" - -using kagome::primitives::BlockHash; -using kagome::primitives::BlockNumber; -using kagome::storage::Space; - -namespace kagome::blockchain { - - BlockHeaderRepositoryImpl::BlockHeaderRepositoryImpl( - std::shared_ptr storage, - std::shared_ptr hasher) - : storage_{std::move(storage)}, hasher_{std::move(hasher)} { - BOOST_ASSERT(hasher_); - } - - outcome::result BlockHeaderRepositoryImpl::getNumberByHash( - const primitives::BlockHash &hash) const { - OUTCOME_TRY(header, getBlockHeader(hash)); - return header.number; - } - - outcome::result - BlockHeaderRepositoryImpl::getHashByNumber( - primitives::BlockNumber number) const { - auto num_to_idx_key = blockNumberToKey(number); - auto key_space = storage_->getSpace(Space::kLookupKey); - auto res = key_space->get(num_to_idx_key); - if (not res.has_value()) { - return BlockTreeError::HEADER_NOT_FOUND; - } - return primitives::BlockHash::fromSpan(res.value()); - } - - outcome::result - BlockHeaderRepositoryImpl::getBlockHeader( - const primitives::BlockHash &block_hash) const { - OUTCOME_TRY(header_opt, - getFromSpace(*storage_, Space::kHeader, block_hash)); - if (header_opt.has_value()) { - OUTCOME_TRY(header, - scale::decode(header_opt.value())); - header.hash_opt.emplace(block_hash); - return header; - } - return BlockTreeError::HEADER_NOT_FOUND; - } - - outcome::result BlockHeaderRepositoryImpl::getBlockStatus( - const primitives::BlockHash &block_hash) const { - return getBlockHeader(block_hash).has_value() ? BlockStatus::InChain - : BlockStatus::Unknown; - } - -} // namespace kagome::blockchain diff --git a/core/blockchain/impl/block_header_repository_impl.hpp b/core/blockchain/impl/block_header_repository_impl.hpp deleted file mode 100644 index a0b2d8eb06..0000000000 --- a/core/blockchain/impl/block_header_repository_impl.hpp +++ /dev/null @@ -1,40 +0,0 @@ -/** - * Copyright Quadrivium LLC - * All Rights Reserved - * SPDX-License-Identifier: Apache-2.0 - */ - -#pragma once - -#include "blockchain/block_header_repository.hpp" - -#include "crypto/hasher.hpp" -#include "storage/spaced_storage.hpp" - -namespace kagome::blockchain { - - class BlockHeaderRepositoryImpl : public BlockHeaderRepository { - public: - BlockHeaderRepositoryImpl(std::shared_ptr storage, - std::shared_ptr hasher); - - ~BlockHeaderRepositoryImpl() override = default; - - outcome::result getNumberByHash( - const primitives::BlockHash &block_hash) const override; - - outcome::result getHashByNumber( - primitives::BlockNumber block_number) const override; - - outcome::result getBlockHeader( - const primitives::BlockHash &block_hash) const override; - - outcome::result getBlockStatus( - const primitives::BlockHash &block_hash) const override; - - private: - std::shared_ptr storage_; - std::shared_ptr hasher_; - }; - -} // namespace kagome::blockchain diff --git a/core/blockchain/impl/block_storage_impl.cpp b/core/blockchain/impl/block_storage_impl.cpp index 12c00ff4ce..63a34460b6 100644 --- a/core/blockchain/impl/block_storage_impl.cpp +++ b/core/blockchain/impl/block_storage_impl.cpp @@ -118,26 +118,19 @@ namespace kagome::blockchain { if (j_opt.has_value()) { break; } - OUTCOME_TRY(header_opt, getBlockHeader(current_hash)); - if (header_opt.has_value()) { - auto header = header_opt.value(); - if (header.number == 0) { - SL_TRACE(logger_, - "Not found block with justification. " - "Genesis block will be used as last finalized ({})", - current_hash); - return {0, current_hash}; // genesis - } - current_hash = header.parent_hash; - } else { - SL_ERROR( - logger_, "Failed to fetch header for block ({})", current_hash); - return BlockStorageError::HEADER_NOT_FOUND; + OUTCOME_TRY(header, getBlockHeader(current_hash)); + if (header.number == 0) { + SL_TRACE(logger_, + "Not found block with justification. " + "Genesis block will be used as last finalized ({})", + current_hash); + return {0, current_hash}; // genesis } + current_hash = header.parent_hash; } OUTCOME_TRY(header, getBlockHeader(current_hash)); - primitives::BlockInfo found_block{header.value().number, current_hash}; + primitives::BlockInfo found_block{header.number, current_hash}; SL_TRACE(logger_, "Justification is found in block {}. " "This block will be used as last finalized", @@ -206,19 +199,19 @@ namespace kagome::blockchain { return block_hash; } - outcome::result> - BlockStorageImpl::getBlockHeader( + outcome::result BlockStorageImpl::getBlockHeader( const primitives::BlockHash &block_hash) const { - OUTCOME_TRY(encoded_header_opt, - getFromSpace(*storage_, Space::kHeader, block_hash)); - if (encoded_header_opt.has_value()) { - OUTCOME_TRY( - header, - scale::decode(encoded_header_opt.value())); - header.hash_opt.emplace(block_hash); - return header; + OUTCOME_TRY(header_opt, fetchBlockHeader(block_hash)); + if (header_opt.has_value()) { + return header_opt.value(); } - return std::nullopt; + return BlockStorageError::HEADER_NOT_FOUND; + } + + outcome::result> + BlockStorageImpl::tryGetBlockHeader( + const primitives::BlockHash &block_hash) const { + return fetchBlockHeader(block_hash); } outcome::result BlockStorageImpl::putBlockBody( @@ -309,11 +302,7 @@ namespace kagome::blockchain { primitives::BlockData block_data{.hash = block_hash}; // Block header - OUTCOME_TRY(header_opt, getBlockHeader(block_hash)); - if (not header_opt.has_value()) { - return std::nullopt; - } - auto &header = header_opt.value(); + OUTCOME_TRY(header, getBlockHeader(block_hash)); block_data.header = std::move(header); // Block body @@ -332,8 +321,8 @@ namespace kagome::blockchain { outcome::result BlockStorageImpl::removeBlock( const primitives::BlockHash &block_hash) { // Check if block still in storage - OUTCOME_TRY(header_opt, getBlockHeader(block_hash)); - if (not header_opt.has_value()) { + OUTCOME_TRY(header_opt, fetchBlockHeader(block_hash)); + if (not header_opt) { return outcome::success(); } const auto &header = header_opt.value(); @@ -397,4 +386,18 @@ namespace kagome::blockchain { return outcome::success(); } + outcome::result> + BlockStorageImpl::fetchBlockHeader( + const primitives::BlockHash &block_hash) const { + OUTCOME_TRY(encoded_header_opt, + getFromSpace(*storage_, Space::kHeader, block_hash)); + if (encoded_header_opt.has_value()) { + OUTCOME_TRY( + header, + scale::decode(encoded_header_opt.value())); + header.hash_opt.emplace(block_hash); + return std::make_optional(std::move(header)); + } + return std::nullopt; + } } // namespace kagome::blockchain diff --git a/core/blockchain/impl/block_storage_impl.hpp b/core/blockchain/impl/block_storage_impl.hpp index 927aabeadd..3f229dc713 100644 --- a/core/blockchain/impl/block_storage_impl.hpp +++ b/core/blockchain/impl/block_storage_impl.hpp @@ -60,7 +60,10 @@ namespace kagome::blockchain { outcome::result putBlockHeader( const primitives::BlockHeader &header) override; - outcome::result> getBlockHeader( + outcome::result getBlockHeader( + const primitives::BlockHash &block_hash) const override; + + outcome::result> tryGetBlockHeader( const primitives::BlockHash &block_hash) const override; // -- body -- @@ -102,6 +105,9 @@ namespace kagome::blockchain { BlockStorageImpl(std::shared_ptr storage, std::shared_ptr hasher); + outcome::result> fetchBlockHeader( + const primitives::BlockHash &block_hash) const; + std::shared_ptr storage_; std::shared_ptr hasher_; diff --git a/core/blockchain/impl/block_tree_impl.cpp b/core/blockchain/impl/block_tree_impl.cpp index 5b32499069..115d556a1a 100644 --- a/core/blockchain/impl/block_tree_impl.cpp +++ b/core/blockchain/impl/block_tree_impl.cpp @@ -13,6 +13,7 @@ #include "blockchain/block_tree_error.hpp" #include "blockchain/impl/cached_tree.hpp" #include "blockchain/impl/justification_storage_policy.hpp" +#include "blockchain/impl/storage_util.hpp" #include "common/main_thread_pool.hpp" #include "consensus/babe/impl/babe_digests_util.hpp" #include "consensus/babe/is_primary.hpp" @@ -35,11 +36,8 @@ namespace kagome::blockchain { namespace { /// Function-helper for loading (and repair if it needed) of leaves outcome::result> loadLeaves( - const std::shared_ptr &storage, - const std::shared_ptr &header_repo, - const log::Logger &log) { + const std::shared_ptr &storage, const log::Logger &log) { BOOST_ASSERT(storage != nullptr); - BOOST_ASSERT(header_repo != nullptr); std::set block_tree_leaves; { @@ -49,16 +47,17 @@ namespace kagome::blockchain { block_tree_unordered_leaves.size()); for (auto &hash : block_tree_unordered_leaves) { - auto res = header_repo->getNumberById(hash); - if (res.has_error()) { - if (res == outcome::failure(BlockTreeError::HEADER_NOT_FOUND)) { + // get block nuber by hash + const auto header = storage->getBlockHeader(hash); + if (not header) { + if (header == outcome::failure(BlockTreeError::HEADER_NOT_FOUND)) { SL_TRACE(log, "Leaf {} not found", hash); continue; } - SL_ERROR(log, "Leaf {} is corrupted: {}", hash, res.error()); - return res.as_failure(); + SL_ERROR(log, "Leaf {} is corrupted: {}", hash, header.error()); + return header.as_failure(); } - auto number = res.value(); + auto number = header.value().number; SL_TRACE(log, "Leaf {} found", primitives::BlockInfo(number, hash)); block_tree_leaves.emplace(number, hash); } @@ -95,7 +94,8 @@ namespace kagome::blockchain { } } - OUTCOME_TRY(hash, header_repo->getHashById(number)); + OUTCOME_TRY(hash_opt_res, storage->getBlockHash(number)); + primitives::BlockHash hash = hash_opt_res.value(); block_tree_leaves.emplace(number, hash); if (auto res = storage->setBlockTreeLeaves({hash}); res.has_error()) { @@ -114,9 +114,7 @@ namespace kagome::blockchain { outcome::result> BlockTreeImpl::create( const application::AppConfiguration &app_config, - std::shared_ptr header_repo, std::shared_ptr storage, - std::shared_ptr extrinsic_observer, std::shared_ptr hasher, primitives::events::ChainSubscriptionEnginePtr chain_events_engine, primitives::events::ExtrinsicSubscriptionEnginePtr @@ -128,7 +126,6 @@ namespace kagome::blockchain { std::shared_ptr state_pruner, common::MainThreadPool &main_thread_pool) { BOOST_ASSERT(storage != nullptr); - BOOST_ASSERT(header_repo != nullptr); log::Logger log = log::createLogger("BlockTree", "block_tree"); @@ -136,10 +133,9 @@ namespace kagome::blockchain { auto finalized_block_header_res = storage->getBlockHeader(last_finalized_block_info.hash); - BOOST_ASSERT_MSG(finalized_block_header_res.has_value() - and finalized_block_header_res.value().has_value(), + BOOST_ASSERT_MSG(finalized_block_header_res.has_value(), "Initialized block tree must be have finalized block"); - auto &finalized_block_header = finalized_block_header_res.value().value(); + auto &finalized_block_header = finalized_block_header_res.value(); // call chain_events_engine->notify to init babe_config_repo preventive chain_events_engine->notify( primitives::events::ChainEventType::kFinalizedHeads, @@ -147,7 +143,7 @@ namespace kagome::blockchain { OUTCOME_TRY(storage->getJustification(last_finalized_block_info.hash)); - OUTCOME_TRY(block_tree_leaves, loadLeaves(storage, header_repo, log)); + OUTCOME_TRY(block_tree_leaves, loadLeaves(storage, log)); BOOST_ASSERT_MSG(not block_tree_leaves.empty(), "Must be known or calculated at least one leaf"); @@ -196,16 +192,16 @@ namespace kagome::blockchain { dead.emplace(fork); auto f_res = storage->getBlockHeader(fork.hash); - if (f_res.has_error() or not f_res.value().has_value()) { + if (f_res.has_error()) { break; } - const auto &fork_header = f_res.value().value(); + const auto &fork_header = f_res.value(); auto m_res = storage->getBlockHeader(main.hash); - if (m_res.has_error() or not m_res.value().has_value()) { + if (m_res.has_error()) { break; } - const auto &main_header = m_res.value().value(); + const auto &main_header = m_res.value(); BOOST_ASSERT(fork_header.number == main_header.number); if (fork_header.parent_hash == main_header.parent_hash) { @@ -230,19 +226,9 @@ namespace kagome::blockchain { return header_res.as_failure(); } - auto &header_opt = header_res.value(); - if (not header_opt.has_value()) { - SL_WARN(log, - "Can't get header of existing block {}: " - "not found in block storage", - block); - dead.insert(subchain.begin(), subchain.end()); - break; - } - observed.emplace(block.hash); - auto &header = header_opt.value(); + auto &header = header_res.value(); if (header.number < last_finalized_block_info.number) { SL_WARN( log, @@ -275,10 +261,8 @@ namespace kagome::blockchain { std::shared_ptr block_tree( new BlockTreeImpl(app_config, - std::move(header_repo), std::move(storage), last_finalized_block_info, - std::move(extrinsic_observer), std::move(hasher), std::move(chain_events_engine), std::move(extrinsic_events_engine), @@ -311,16 +295,14 @@ namespace kagome::blockchain { outcome::result BlockTreeImpl::recover( const primitives::BlockId &target_block_id, std::shared_ptr storage, - std::shared_ptr header_repo, std::shared_ptr trie_storage, std::shared_ptr block_tree) { BOOST_ASSERT(storage != nullptr); - BOOST_ASSERT(header_repo != nullptr); BOOST_ASSERT(trie_storage != nullptr); log::Logger log = log::createLogger("BlockTree", "block_tree"); - OUTCOME_TRY(block_tree_leaves, loadLeaves(storage, header_repo, log)); + OUTCOME_TRY(block_tree_leaves, loadLeaves(storage, log)); BOOST_ASSERT_MSG(not block_tree_leaves.empty(), "Must be known or calculated at least one leaf"); @@ -339,20 +321,15 @@ namespace kagome::blockchain { const auto &target_block_hash = target_block_hash_opt_res.value().value(); // Check if target block exists - auto target_block_header_opt_res = - storage->getBlockHeader(target_block_hash); - if (target_block_header_opt_res.has_error()) { + auto target_block_header_res = storage->getBlockHeader(target_block_hash); + if (target_block_header_res.has_error()) { SL_CRITICAL(log, "Can't get header of target block: {}", - target_block_header_opt_res.error()); - return target_block_header_opt_res.as_failure(); - } - const auto &target_block_header_opt = target_block_header_opt_res.value(); - if (not target_block_header_opt.has_value()) { - return BlockTreeError::HEADER_NOT_FOUND; + target_block_header_res.error()); + return target_block_header_res.as_failure(); } - const auto &target_block_header = target_block_header_opt.value(); + const auto &target_block_header = target_block_header_res.value(); const auto &state_root = target_block_header.state_root; // Check if target block has state @@ -371,19 +348,15 @@ namespace kagome::blockchain { break; } - auto header_opt_res = storage->getBlockHeader(block.hash); - if (header_opt_res.has_error()) { + auto header_res = storage->getBlockHeader(block.hash); + if (header_res.has_error()) { SL_CRITICAL(log, "Can't get header of one of removing block: {}", - header_opt_res.error()); - return header_opt_res.as_failure(); - } - const auto &header_opt = header_opt_res.value(); - if (not header_opt.has_value()) { - return BlockTreeError::HEADER_NOT_FOUND; + header_res.error()); + return header_res.as_failure(); } - const auto &header = header_opt.value(); + const auto &header = header_res.value(); block_tree_leaves.emplace(*header.parentInfo()); block_tree_leaves.erase(block); @@ -410,10 +383,8 @@ namespace kagome::blockchain { BlockTreeImpl::BlockTreeImpl( const application::AppConfiguration &app_config, - std::shared_ptr header_repo, std::shared_ptr storage, const primitives::BlockInfo &finalized, - std::shared_ptr extrinsic_observer, std::shared_ptr hasher, primitives::events::ChainSubscriptionEnginePtr chain_events_engine, primitives::events::ExtrinsicSubscriptionEnginePtr @@ -425,11 +396,9 @@ namespace kagome::blockchain { std::shared_ptr state_pruner, common::MainThreadPool &main_thread_pool) : block_tree_data_{BlockTreeData{ - .header_repo_ = std::move(header_repo), .storage_ = std::move(storage), .state_pruner_ = std::move(state_pruner), .tree_ = std::make_unique(finalized), - .extrinsic_observer_ = std::move(extrinsic_observer), .hasher_ = std::move(hasher), .extrinsic_event_key_repo_ = std::move(extrinsic_event_key_repo), .justification_storage_policy_ = @@ -441,10 +410,8 @@ namespace kagome::blockchain { main_pool_handler_{main_thread_pool.handlerStarted()}, extrinsic_events_engine_{std::move(extrinsic_events_engine)} { block_tree_data_.sharedAccess([&](const BlockTreeData &p) { - BOOST_ASSERT(p.header_repo_ != nullptr); BOOST_ASSERT(p.storage_ != nullptr); BOOST_ASSERT(p.tree_ != nullptr); - BOOST_ASSERT(p.extrinsic_observer_ != nullptr); BOOST_ASSERT(p.hasher_ != nullptr); BOOST_ASSERT(p.extrinsic_event_key_repo_ != nullptr); BOOST_ASSERT(p.justification_storage_policy_ != nullptr); @@ -493,7 +460,7 @@ namespace kagome::blockchain { return p.genesis_block_hash_.value(); } - auto res = p.header_repo_->getHashByNumber(0); + auto res = p.storage_->getBlockHash(0); BOOST_ASSERT_MSG( res.has_value(), "Block tree must contain at least genesis block"); @@ -501,7 +468,7 @@ namespace kagome::blockchain { // NOLINTNEXTLINE(cppcoreguidelines-pro-type-const-cast) const_cast &>( p.genesis_block_hash_) - .emplace(res.value()); + .emplace(res.value().value()); return p.genesis_block_hash_.value(); }) .get(); @@ -713,12 +680,7 @@ namespace kagome::blockchain { auto finalized = getLastFinalizedNoLock(p).number; for (auto hash = block_header.parent_hash;;) { - OUTCOME_TRY(header_opt, p.storage_->getBlockHeader(hash)); - if (not header_opt.has_value()) { - return BlockTreeError::NO_PARENT; - } - - auto &header = header_opt.value(); + OUTCOME_TRY(header, p.storage_->getBlockHeader(hash)); SL_TRACE(log_, "Block {} has found in storage and enqueued to add", primitives::BlockInfo(header.number, hash)); @@ -806,11 +768,7 @@ namespace kagome::blockchain { if (node) { SL_DEBUG(log_, "Finalizing block {}", node->info); - OUTCOME_TRY(header_opt, p.storage_->getBlockHeader(block_hash)); - if (not header_opt.has_value()) { - return BlockTreeError::HEADER_NOT_FOUND; - } - auto &header = header_opt.value(); + OUTCOME_TRY(header, p.storage_->getBlockHeader(block_hash)); OUTCOME_TRY(p.storage_->putJustification(justification, block_hash)); @@ -870,9 +828,8 @@ namespace kagome::blockchain { // we store justification for last finalized block only as long as it is // last finalized (if it doesn't meet other justification storage rules, // e.g. its number a multiple of 512) - OUTCOME_TRY( - last_finalized_header, - p.header_repo_->getBlockHeader(last_finalized_block_info.hash)); + OUTCOME_TRY(last_finalized_header, + p.storage_->getBlockHeader(last_finalized_block_info.hash)); OUTCOME_TRY( shouldStoreLastFinalized, p.justification_storage_policy_->shouldStoreFor( @@ -903,11 +860,12 @@ namespace kagome::blockchain { OUTCOME_TRY(p.storage_->removeBlockBody(*hash)); } } else { - OUTCOME_TRY(header, p.header_repo_->getBlockHeader(block_hash)); - if (header.number >= last_finalized_block_info.number) { + OUTCOME_TRY(header, p.storage_->getBlockHeader(block_hash)); + const auto header_number = header.number; + if (header_number >= last_finalized_block_info.number) { return BlockTreeError::NON_FINALIZED_BLOCK_NOT_FOUND; } - OUTCOME_TRY(canon_hash, p.header_repo_->getHashByNumber(header.number)); + OUTCOME_TRY(canon_hash, p.storage_->getBlockHash(header_number)); if (block_hash != canon_hash) { return BlockTreeError::BLOCK_ON_DEAD_END; } @@ -946,11 +904,7 @@ namespace kagome::blockchain { outcome::result BlockTreeImpl::getBlockHeaderNoLock( const BlockTreeData &p, const primitives::BlockHash &block_hash) const { - OUTCOME_TRY(header, p.storage_->getBlockHeader(block_hash)); - if (header.has_value()) { - return header.value(); - } - return BlockTreeError::HEADER_NOT_FOUND; + return p.storage_->getBlockHeader(block_hash); } outcome::result BlockTreeImpl::getBlockHeader( @@ -962,6 +916,24 @@ namespace kagome::blockchain { }); } + outcome::result> + BlockTreeImpl::tryGetBlockHeader( + const primitives::BlockHash &block_hash) const { + return block_tree_data_.sharedAccess( + [&](const BlockTreeData &p) + -> outcome::result> { + auto header = p.storage_->getBlockHeader(block_hash); + if (header) { + return header.value(); + } + const auto &header_error = header.error(); + if (header_error == BlockTreeError::HEADER_NOT_FOUND) { + return std::nullopt; + } + return header_error; + }); + } + outcome::result BlockTreeImpl::getBlockBody( const primitives::BlockHash &block_hash) const { return block_tree_data_.sharedAccess( @@ -992,13 +964,13 @@ namespace kagome::blockchain { const primitives::BlockHash &block, uint64_t maximum) const { return block_tree_data_.sharedAccess([&](const BlockTreeData &p) -> BlockTree::BlockHashVecRes { - auto block_number_res = p.header_repo_->getNumberByHash(block); - if (block_number_res.has_error()) { + auto block_header_res = p.storage_->getBlockHeader(block); + if (block_header_res.has_error()) { log_->error( - "cannot retrieve block {}: {}", block, block_number_res.error()); + "cannot retrieve block {}: {}", block, block_header_res.error()); return BlockTreeError::HEADER_NOT_FOUND; } - auto start_block_number = block_number_res.value(); + auto start_block_number = block_header_res.value().number; if (maximum == 1) { return std::vector{block}; @@ -1017,14 +989,14 @@ namespace kagome::blockchain { start_block_number + count - 1; auto finish_block_hash_res = - p.header_repo_->getHashByNumber(finish_block_number); + p.storage_->getBlockHash(finish_block_number); if (finish_block_hash_res.has_error()) { log_->error("cannot retrieve block with number {}: {}", finish_block_number, finish_block_hash_res.error()); return BlockTreeError::HEADER_NOT_FOUND; } - const auto &finish_block_hash = finish_block_hash_res.value(); + const auto &finish_block_hash = finish_block_hash_res.value().value(); OUTCOME_TRY(chain, getDescendingChainToBlockNoLock(p, finish_block_hash, count)); @@ -1059,7 +1031,7 @@ namespace kagome::blockchain { } while (maximum > chain.size()) { - auto header_res = p.header_repo_->getBlockHeader(hash); + auto header_res = p.storage_->getBlockHeader(hash); if (header_res.has_error()) { if (chain.empty()) { log_->error("Cannot retrieve block with hash {}: {}", @@ -1092,8 +1064,10 @@ namespace kagome::blockchain { const primitives::BlockHash &descendant) const { return block_tree_data_.sharedAccess( [&](const BlockTreeData &p) -> BlockTreeImpl::BlockHashVecRes { - OUTCOME_TRY(from, p.header_repo_->getNumberByHash(ancestor)); - OUTCOME_TRY(to, p.header_repo_->getNumberByHash(descendant)); + OUTCOME_TRY(from_header, p.storage_->getBlockHeader(ancestor)); + auto from = from_header.number; + OUTCOME_TRY(to_header, p.storage_->getBlockHeader(descendant)); + auto to = to_header.number; if (to < from) { return BlockTreeError::TARGET_IS_PAST_MAX; } @@ -1136,20 +1110,20 @@ namespace kagome::blockchain { if (ancestor_node_ptr) { ancestor_depth = ancestor_node_ptr->info.number; } else { - auto number_res = p.header_repo_->getNumberByHash(ancestor); - if (!number_res) { + auto header_res = p.storage_->getBlockHeader(ancestor); + if (!header_res) { return false; } - ancestor_depth = number_res.value(); + ancestor_depth = header_res.value().number; } if (descendant_node_ptr) { descendant_depth = descendant_node_ptr->info.number; } else { - auto number_res = p.header_repo_->getNumberByHash(descendant); - if (!number_res) { + auto header_res = p.storage_->getBlockHeader(descendant); + if (!header_res) { return false; } - descendant_depth = number_res.value(); + descendant_depth = header_res.value().number; } if (descendant_depth < ancestor_depth) { SL_DEBUG(log_, @@ -1164,7 +1138,9 @@ namespace kagome::blockchain { auto finalized = [&](const primitives::BlockHash &hash, primitives::BlockNumber number) { return number <= getLastFinalizedNoLock(p).number - and p.header_repo_->getHashByNumber(number) == outcome::success(hash); + and p.storage_->getBlockHash(number) + == outcome::success( + std::optional(hash)); }; if (descendant_node_ptr or finalized(descendant, descendant_depth)) { return finalized(ancestor, ancestor_depth); @@ -1173,7 +1149,7 @@ namespace kagome::blockchain { auto current_hash = descendant; KAGOME_PROFILE_START(search_finalized_chain) while (current_hash != ancestor) { - auto current_header_res = p.header_repo_->getBlockHeader(current_hash); + auto current_header_res = p.storage_->getBlockHeader(current_hash); if (!current_header_res) { return false; } @@ -1197,8 +1173,9 @@ namespace kagome::blockchain { bool BlockTreeImpl::isFinalized(const primitives::BlockInfo &block) const { return block_tree_data_.sharedAccess([&](const BlockTreeData &p) { return block.number <= getLastFinalizedNoLock(p).number - and p.header_repo_->getHashByNumber(block.number) - == outcome::success(block.hash); + and p.storage_->getBlockHash(block.number) + == outcome::success( + std::optional(block.hash)); }); } @@ -1225,11 +1202,12 @@ namespace kagome::blockchain { // If target has not found in block tree (in memory), // it means block finalized or discarded if (not target) { - OUTCOME_TRY(target_number, - p.header_repo_->getNumberByHash(target_hash)); + OUTCOME_TRY(target_header, p.storage_->getBlockHeader(target_hash)); + auto target_number = target_header.number; - OUTCOME_TRY(canon_hash, - p.header_repo_->getHashByNumber(target_number)); + OUTCOME_TRY(canon_hash_res, + p.storage_->getBlockHash(target_number)); + auto canon_hash = canon_hash_res.value(); if (canon_hash != target_hash) { return BlockTreeError::BLOCK_ON_DEAD_END; @@ -1259,26 +1237,23 @@ namespace kagome::blockchain { BlockTreeImpl::BlockHashVecRes BlockTreeImpl::getChildren( const primitives::BlockHash &block) const { - return block_tree_data_.sharedAccess([&](const BlockTreeData &p) - -> BlockTreeImpl::BlockHashVecRes { - if (auto node = p.tree_->find(block); node != nullptr) { - std::vector result; - result.reserve(node->children.size()); - for (const auto &child : node->children) { - result.push_back(child->info.hash); - } - return result; - } - OUTCOME_TRY(header, p.storage_->getBlockHeader(block)); - if (!header.has_value()) { - return BlockTreeError::HEADER_NOT_FOUND; - } - // if node is not in tree_ it must be finalized and thus have only one - // child - OUTCOME_TRY(child_hash, - p.header_repo_->getHashByNumber(header.value().number + 1)); - return outcome::success(std::vector{child_hash}); - }); + return block_tree_data_.sharedAccess( + [&](const BlockTreeData &p) -> BlockTreeImpl::BlockHashVecRes { + if (auto node = p.tree_->find(block); node != nullptr) { + std::vector result; + result.reserve(node->children.size()); + for (const auto &child : node->children) { + result.push_back(child->info.hash); + } + return result; + } + OUTCOME_TRY(header, p.storage_->getBlockHeader(block)); + // if node is not in tree_ it must be finalized and thus have only one + // child + OUTCOME_TRY(child_hash, p.storage_->getBlockHash(header.number + 1)); + return outcome::success( + std::vector{child_hash.value()}); + }); } primitives::BlockInfo BlockTreeImpl::getLastFinalizedNoLock( @@ -1308,9 +1283,6 @@ namespace kagome::blockchain { metric_best_block_height_->set(changes.reorg->common.number); } } - for (auto &block : changes.prune) { - OUTCOME_TRY(p.storage_->removeBlock(block.hash)); - } std::vector extrinsics; std::vector @@ -1319,7 +1291,7 @@ namespace kagome::blockchain { // remove from storage retired_hashes.reserve(changes.prune.size()); for (const auto &block : changes.prune) { - OUTCOME_TRY(block_header_opt, p.storage_->getBlockHeader(block.hash)); + OUTCOME_TRY(block_header, p.storage_->getBlockHeader(block.hash)); OUTCOME_TRY(block_body_opt, p.storage_->getBlockBody(block.hash)); if (block_body_opt.has_value()) { extrinsics.reserve(extrinsics.size() + block_body_opt.value().size()); @@ -1339,8 +1311,7 @@ namespace kagome::blockchain { } extrinsics.emplace_back(std::move(ext)); } - BOOST_ASSERT(block_header_opt.has_value()); - OUTCOME_TRY(p.state_pruner_->pruneDiscarded(block_header_opt.value())); + OUTCOME_TRY(p.state_pruner_->pruneDiscarded(block_header)); } retired_hashes.emplace_back( primitives::events::RemoveAfterFinalizationParams::HeaderInfo{ @@ -1348,34 +1319,6 @@ namespace kagome::blockchain { OUTCOME_TRY(p.storage_->removeBlock(block.hash)); } - // trying to return extrinsics back to transaction pool - main_pool_handler_->execute( - [extrinsics{std::move(extrinsics)}, - wself{weak_from_this()}, - retired{primitives::events::RemoveAfterFinalizationParams{ - .removed = std::move(retired_hashes), - .finalized = getLastFinalizedNoLock(p).number}}]() mutable { - if (auto self = wself.lock()) { - auto eo = self->block_tree_data_.sharedAccess( - [&](const BlockTreeData &p) { return p.extrinsic_observer_; }); - - for (auto &&extrinsic : extrinsics) { - auto result = eo->onTxMessage(extrinsic); - if (result) { - SL_DEBUG( - self->log_, "Tx {} was reapplied", result.value().toHex()); - } else { - SL_DEBUG(self->log_, "Tx was skipped: {}", result.error()); - } - } - - self->chain_events_engine_->notify( - primitives::events::ChainEventType:: - kDeactivateAfterFinalization, - retired); - } - }); - return outcome::success(); } @@ -1446,6 +1389,22 @@ namespace kagome::blockchain { }); } + // BlockHeaderRepository methods + outcome::result BlockTreeImpl::getNumberByHash( + const primitives::BlockHash &hash) const { + OUTCOME_TRY(header, getBlockHeader(hash)); + return header.number; + } + + outcome::result BlockTreeImpl::getHashByNumber( + primitives::BlockNumber number) const { + OUTCOME_TRY(block_hash_opt, getBlockHash(number)); + if (block_hash_opt.has_value()) { + return block_hash_opt.value(); + } + return BlockTreeError::HEADER_NOT_FOUND; + } + BlockTreeImpl::BlocksPruning::BlocksPruning(std::optional keep, primitives::BlockNumber finalized) : keep_{keep}, next_{max(finalized)} {} diff --git a/core/blockchain/impl/block_tree_impl.hpp b/core/blockchain/impl/block_tree_impl.hpp index c7e80960bb..7ad1b9e78e 100644 --- a/core/blockchain/impl/block_tree_impl.hpp +++ b/core/blockchain/impl/block_tree_impl.hpp @@ -18,15 +18,14 @@ #include #include "application/app_configuration.hpp" -#include "blockchain/block_header_repository.hpp" #include "blockchain/block_storage.hpp" #include "blockchain/block_tree_error.hpp" +#include "blockchain/impl/cached_tree.hpp" #include "consensus/babe/types/babe_configuration.hpp" #include "consensus/timeline/types.hpp" #include "crypto/hasher.hpp" #include "log/logger.hpp" #include "metrics/metrics.hpp" -#include "network/extrinsic_observer.hpp" #include "primitives/event_types.hpp" #include "storage/trie/trie_storage.hpp" #include "subscription/extrinsic_event_key_repository.hpp" @@ -40,7 +39,6 @@ namespace kagome { namespace kagome::blockchain { struct ReorgAndPrune; class TreeNode; - class CachedTree; } // namespace kagome::blockchain namespace kagome::common { @@ -59,9 +57,7 @@ namespace kagome::blockchain { /// Create an instance of block tree static outcome::result> create( const application::AppConfiguration &app_config, - std::shared_ptr header_repo, std::shared_ptr storage, - std::shared_ptr extrinsic_observer, std::shared_ptr hasher, primitives::events::ChainSubscriptionEnginePtr chain_events_engine, primitives::events::ExtrinsicSubscriptionEnginePtr @@ -77,7 +73,6 @@ namespace kagome::blockchain { static outcome::result recover( const primitives::BlockId &target_block_id, std::shared_ptr storage, - std::shared_ptr header_repo, std::shared_ptr trie_storage, std::shared_ptr block_tree); @@ -93,6 +88,9 @@ namespace kagome::blockchain { outcome::result getBlockHeader( const primitives::BlockHash &block_hash) const override; + outcome::result> tryGetBlockHeader( + const primitives::BlockHash &block_hash) const override; + outcome::result getBlockBody( const primitives::BlockHash &block_hash) const override; @@ -159,6 +157,13 @@ namespace kagome::blockchain { void removeUnfinalized() override; + // BlockHeaderRepository methods + outcome::result getNumberByHash( + const primitives::BlockHash &block_hash) const override; + + outcome::result getHashByNumber( + primitives::BlockNumber block_number) const override; + private: struct BlocksPruning { BlocksPruning(std::optional keep, @@ -171,11 +176,9 @@ namespace kagome::blockchain { }; struct BlockTreeData { - std::shared_ptr header_repo_; std::shared_ptr storage_; std::shared_ptr state_pruner_; std::unique_ptr tree_; - std::shared_ptr extrinsic_observer_; std::shared_ptr hasher_; std::shared_ptr extrinsic_event_key_repo_; @@ -191,10 +194,8 @@ namespace kagome::blockchain { */ BlockTreeImpl( const application::AppConfiguration &app_config, - std::shared_ptr header_repo, std::shared_ptr storage, const primitives::BlockInfo &finalized, - std::shared_ptr extrinsic_observer, std::shared_ptr hasher, primitives::events::ChainSubscriptionEnginePtr chain_events_engine, primitives::events::ExtrinsicSubscriptionEnginePtr diff --git a/core/blockchain/indexer.hpp b/core/blockchain/indexer.hpp index c02566f9f3..428b047c77 100644 --- a/core/blockchain/indexer.hpp +++ b/core/blockchain/indexer.hpp @@ -94,7 +94,7 @@ namespace kagome::blockchain { */ template struct Indexer { - Indexer(std::shared_ptr db, + Indexer(std::shared_ptr db, std::shared_ptr block_tree) : db_{std::move(db)}, block_tree_{std::move(block_tree)} { primitives::BlockInfo genesis{0, block_tree_->getGenesisBlockHash()}; @@ -252,7 +252,7 @@ namespace kagome::blockchain { return raw->kv; } - std::shared_ptr db_; + std::shared_ptr db_; std::shared_ptr block_tree_; primitives::BlockInfo last_finalized_indexed_; std::map> map_; diff --git a/core/common/CMakeLists.txt b/core/common/CMakeLists.txt index f167b02b6b..ba231b80bc 100644 --- a/core/common/CMakeLists.txt +++ b/core/common/CMakeLists.txt @@ -9,7 +9,7 @@ target_link_libraries(hexutil Boost::boost outcome) kagome_install(hexutil) add_library(blob blob.hpp blob.cpp) -target_link_libraries(blob hexutil scale::scale) +target_link_libraries(blob hexutil) kagome_install(blob) add_library(fd_limit fd_limit.hpp fd_limit.cpp) diff --git a/core/consensus/babe/has_babe_consensus_digest.hpp b/core/consensus/babe/has_babe_consensus_digest.hpp index 21c8b3e35a..cd8f61ba75 100644 --- a/core/consensus/babe/has_babe_consensus_digest.hpp +++ b/core/consensus/babe/has_babe_consensus_digest.hpp @@ -48,7 +48,7 @@ namespace kagome::consensus::babe { } } - explicit operator bool() const { + operator bool() const { return epoch.has_value(); } diff --git a/core/consensus/babe/impl/babe_config_repository_impl.cpp b/core/consensus/babe/impl/babe_config_repository_impl.cpp index 786753be9f..9d2556e502 100644 --- a/core/consensus/babe/impl/babe_config_repository_impl.cpp +++ b/core/consensus/babe/impl/babe_config_repository_impl.cpp @@ -10,7 +10,6 @@ #include "application/app_state_manager.hpp" #include "babe.hpp" #include "babe_digests_util.hpp" -#include "blockchain/block_header_repository.hpp" #include "blockchain/block_tree.hpp" #include "consensus/consensus_selector.hpp" #include "consensus/timeline/slots_util.hpp" @@ -52,14 +51,13 @@ namespace kagome::consensus::babe { const application::AppConfiguration &app_config, EpochTimings &timings, std::shared_ptr block_tree, - std::shared_ptr header_repo, LazySPtr consensus_selector, std::shared_ptr babe_api, std::shared_ptr trie_storage, primitives::events::ChainSubscriptionEnginePtr chain_events_engine, LazySPtr slots_util) : persistent_storage_( - persistent_storage->getSpace(storage::Space::kDefault)), + persistent_storage->getSpace(storage::Space::kDefault)), config_warp_sync_{app_config.syncMethod() == application::SyncMethod::Warp}, timings_(timings), @@ -70,7 +68,6 @@ namespace kagome::consensus::babe { persistent_storage_), block_tree_, }, - header_repo_(std::move(header_repo)), consensus_selector_(consensus_selector), babe_api_(std::move(babe_api)), trie_storage_(std::move(trie_storage)), @@ -79,7 +76,6 @@ namespace kagome::consensus::babe { logger_(log::createLogger("BabeConfigRepo", "babe_config_repo")) { BOOST_ASSERT(persistent_storage_ != nullptr); BOOST_ASSERT(block_tree_ != nullptr); - BOOST_ASSERT(header_repo_ != nullptr); BOOST_ASSERT(babe_api_ != nullptr); SAFE_UNIQUE(indexer_) { diff --git a/core/consensus/babe/impl/babe_config_repository_impl.hpp b/core/consensus/babe/impl/babe_config_repository_impl.hpp index 595678f4e6..95aad199b5 100644 --- a/core/consensus/babe/impl/babe_config_repository_impl.hpp +++ b/core/consensus/babe/impl/babe_config_repository_impl.hpp @@ -25,7 +25,6 @@ namespace kagome::application { namespace kagome::blockchain { class BlockTree; - class BlockHeaderRepository; } // namespace kagome::blockchain namespace kagome::consensus { @@ -80,7 +79,6 @@ namespace kagome::consensus::babe { const application::AppConfiguration &app_config, EpochTimings &timings, std::shared_ptr block_tree, - std::shared_ptr header_repo, LazySPtr consensus_selector, std::shared_ptr babe_api, std::shared_ptr trie_storage, @@ -122,12 +120,11 @@ namespace kagome::consensus::babe { void warp(Indexer &indexer_, const primitives::BlockInfo &block); - std::shared_ptr persistent_storage_; + std::shared_ptr persistent_storage_; bool config_warp_sync_; EpochTimings &timings_; std::shared_ptr block_tree_; mutable SafeObject indexer_; - std::shared_ptr header_repo_; LazySPtr consensus_selector_; std::shared_ptr babe_api_; std::shared_ptr trie_storage_; diff --git a/core/consensus/grandpa/has_authority_set_change.hpp b/core/consensus/grandpa/has_authority_set_change.hpp index ac63f149dd..e9e0fcaa6e 100644 --- a/core/consensus/grandpa/has_authority_set_change.hpp +++ b/core/consensus/grandpa/has_authority_set_change.hpp @@ -36,7 +36,7 @@ namespace kagome::consensus::grandpa { } } - explicit operator bool() const { + operator bool() const { return scheduled || forced; } diff --git a/core/consensus/grandpa/impl/authority_manager_impl.hpp b/core/consensus/grandpa/impl/authority_manager_impl.hpp index 0657a223ae..3e6768dd64 100644 --- a/core/consensus/grandpa/impl/authority_manager_impl.hpp +++ b/core/consensus/grandpa/impl/authority_manager_impl.hpp @@ -14,7 +14,6 @@ #include "consensus/grandpa/has_authority_set_change.hpp" #include "log/logger.hpp" #include "primitives/event_types.hpp" -#include "storage/buffer_map_types.hpp" #include "storage/spaced_storage.hpp" namespace kagome::application { @@ -96,7 +95,7 @@ namespace kagome::consensus::grandpa { std::shared_ptr block_tree_; std::shared_ptr grandpa_api_; - std::shared_ptr persistent_storage_; + std::shared_ptr persistent_storage_; primitives::events::ChainSub chain_sub_; mutable blockchain::Indexer indexer_; diff --git a/core/consensus/grandpa/impl/environment_impl.cpp b/core/consensus/grandpa/impl/environment_impl.cpp index f427ef8d93..e9dd9d91eb 100644 --- a/core/consensus/grandpa/impl/environment_impl.cpp +++ b/core/consensus/grandpa/impl/environment_impl.cpp @@ -13,7 +13,6 @@ #include #include "application/app_state_manager.hpp" -#include "blockchain/block_header_repository.hpp" #include "blockchain/block_tree.hpp" #include "common/main_thread_pool.hpp" #include "consensus/grandpa/authority_manager.hpp" @@ -46,7 +45,6 @@ namespace kagome::consensus::grandpa { EnvironmentImpl::EnvironmentImpl( application::AppStateManager &app_state_manager, std::shared_ptr block_tree, - std::shared_ptr header_repository, std::shared_ptr authority_manager, std::shared_ptr transmitter, std::shared_ptr approved_ancestor, @@ -61,7 +59,6 @@ namespace kagome::consensus::grandpa { std::shared_ptr offchain_worker_pool, common::MainThreadPool &main_thread_pool) : block_tree_{std::move(block_tree)}, - header_repository_{std::move(header_repository)}, authority_manager_{std::move(authority_manager)}, transmitter_{std::move(transmitter)}, approved_ancestor_(std::move(approved_ancestor)), @@ -77,7 +74,6 @@ namespace kagome::consensus::grandpa { main_pool_handler_{main_thread_pool.handler(app_state_manager)}, logger_{log::createLogger("GrandpaEnvironment", "grandpa")} { BOOST_ASSERT(block_tree_ != nullptr); - BOOST_ASSERT(header_repository_ != nullptr); BOOST_ASSERT(authority_manager_ != nullptr); BOOST_ASSERT(transmitter_ != nullptr); BOOST_ASSERT(grandpa_api_ != nullptr); @@ -214,7 +210,7 @@ namespace kagome::consensus::grandpa { best_block = best_undisputed_block; auto block = best_block; while (block.number > finalized.number) { - OUTCOME_TRY(header, header_repository_->getBlockHeader(block.hash)); + OUTCOME_TRY(header, block_tree_->getBlockHeader(block.hash)); if (HasAuthoritySetChange{header}) { best_block = block; } @@ -225,7 +221,7 @@ namespace kagome::consensus::grandpa { if (voter_set_id.has_value()) { while (best_block.number > finalized.number) { OUTCOME_TRY(header, - header_repository_->getBlockHeader(best_block.hash)); + block_tree_->getBlockHeader(best_block.hash)); auto parent_block = *header.parentInfo(); auto voter_set = authority_manager_->authorities( diff --git a/core/consensus/grandpa/impl/environment_impl.hpp b/core/consensus/grandpa/impl/environment_impl.hpp index 20b2c731d2..2780fc6ddc 100644 --- a/core/consensus/grandpa/impl/environment_impl.hpp +++ b/core/consensus/grandpa/impl/environment_impl.hpp @@ -22,7 +22,6 @@ namespace kagome::application { } // namespace kagome::application namespace kagome::blockchain { - class BlockHeaderRepository; class BlockTree; } // namespace kagome::blockchain @@ -65,7 +64,6 @@ namespace kagome::consensus::grandpa { EnvironmentImpl( application::AppStateManager &app_state_manager, std::shared_ptr block_tree, - std::shared_ptr header_repository, std::shared_ptr authority_manager, std::shared_ptr transmitter, std::shared_ptr approved_ancestor, @@ -150,7 +148,6 @@ namespace kagome::consensus::grandpa { private: std::shared_ptr block_tree_; - std::shared_ptr header_repository_; std::shared_ptr authority_manager_; std::shared_ptr transmitter_; std::shared_ptr approved_ancestor_; diff --git a/core/consensus/grandpa/impl/grandpa_impl.cpp b/core/consensus/grandpa/impl/grandpa_impl.cpp index 725d16650d..b45077338f 100644 --- a/core/consensus/grandpa/impl/grandpa_impl.cpp +++ b/core/consensus/grandpa/impl/grandpa_impl.cpp @@ -428,34 +428,34 @@ namespace kagome::consensus::grandpa { // Check if needed to catch-up peer, then do that if (msg.round_number >= current_round_->roundNumber() + kCatchUpThreshold) { - // Do catch-up only when another one is not in progress - if (not pending_catchup_request_.has_value()) { + std::lock_guard _{peer_id_catcup_mutex_}; + // Do catch-up only when another one is not in progress for this peer + if (not pending_catchup_requests_.contains(peer_id)) { environment_->onCatchUpRequested( peer_id, msg.voter_set_id, msg.round_number - 1); - if (pending_catchup_request_.has_value()) { - SL_WARN(logger_, - "Catch up request pending, but another one has done"); - } - pending_catchup_request_.emplace( + pending_catchup_requests_.emplace( peer_id, network::CatchUpRequest{.round_number = msg.round_number - 1, .voter_set_id = msg.voter_set_id}); - catchup_request_timer_handle_ = scheduler_->scheduleWithHandle( - [wp{weak_from_this()}] { - auto self = wp.lock(); - if (not self) { - return; - } - if (self->pending_catchup_request_.has_value()) { - const auto &peer_id = - std::get<0>(self->pending_catchup_request_.value()); - self->reputation_repository_->change( - peer_id, - network::reputation::cost::CATCH_UP_REQUEST_TIMEOUT); - self->pending_catchup_request_.reset(); - } - }, - toMilliseconds(kCatchupRequestTimeout)); + catchup_request_timer_handles_.emplace( + peer_id, + scheduler_->scheduleWithHandle( + [wp{weak_from_this()}, peer_id] { + auto self = wp.lock(); + if (not self) { + return; + } + std::lock_guard _{self->peer_id_catcup_mutex_}; + self->catchup_request_timer_handles_.erase(peer_id); + if (auto it = self->pending_catchup_requests_.find(peer_id); + it != self->pending_catchup_requests_.end()) { + self->reputation_repository_->change( + peer_id, + network::reputation::cost::CATCH_UP_REQUEST_TIMEOUT); + self->pending_catchup_requests_.erase(it); + } + }, + toMilliseconds(kCatchupRequestTimeout))); } } return; @@ -609,8 +609,10 @@ namespace kagome::consensus::grandpa { bool need_cleanup_when_exiting_scope = false; + std::lock_guard _{peer_id_catcup_mutex_}; + auto it = pending_catchup_requests_.find(peer_id); if (allow_missing_blocks) { - if (not pending_catchup_request_.has_value()) { + if (it == pending_catchup_requests_.end()) { SL_DEBUG(logger_, "Catch-up request to round #{} received from {}, " "but catch-up request is not pending or timed out", @@ -621,8 +623,7 @@ namespace kagome::consensus::grandpa { return; } - const auto &[remote_peer_id, catchup_request] = - pending_catchup_request_.value(); + const auto &[remote_peer_id, catchup_request] = *it; if (peer_id != remote_peer_id) { SL_DEBUG(logger_, @@ -679,8 +680,8 @@ namespace kagome::consensus::grandpa { ::libp2p::common::FinalAction cleanup([&] { if (need_cleanup_when_exiting_scope) { - catchup_request_timer_handle_.reset(); - pending_catchup_request_.reset(); + catchup_request_timer_handles_.erase(peer_id); + pending_catchup_requests_.erase(peer_id); } }); @@ -1409,7 +1410,7 @@ namespace kagome::consensus::grandpa { auto *index = vote.is() ? &votes.prevote_idx : vote.is() ? &votes.precommit_idx : nullptr; - if (index and not *index) { + if (index and not*index) { *index = votes.seen.size(); } } diff --git a/core/consensus/grandpa/impl/grandpa_impl.hpp b/core/consensus/grandpa/impl/grandpa_impl.hpp index a9a6189d3b..d5a96e1e04 100644 --- a/core/consensus/grandpa/impl/grandpa_impl.hpp +++ b/core/consensus/grandpa/impl/grandpa_impl.hpp @@ -275,10 +275,11 @@ namespace kagome::consensus::grandpa { std::shared_ptr scheduler_; std::shared_ptr current_round_; - std::optional< - const std::tuple> - pending_catchup_request_; - libp2p::basic::Scheduler::Handle catchup_request_timer_handle_; + std::mutex peer_id_catcup_mutex_; + std::unordered_map + pending_catchup_requests_; + std::unordered_map + catchup_request_timer_handles_; libp2p::basic::Scheduler::Handle fallback_timer_handle_; std::vector waiting_blocks_; diff --git a/core/consensus/timeline/impl/block_executor_impl.cpp b/core/consensus/timeline/impl/block_executor_impl.cpp index eac6dc16af..3fabd82ee3 100644 --- a/core/consensus/timeline/impl/block_executor_impl.cpp +++ b/core/consensus/timeline/impl/block_executor_impl.cpp @@ -275,7 +275,7 @@ namespace kagome::consensus { .count(), lag_msg); - auto const last_finalized_block = + const auto last_finalized_block = self->block_tree_->getLastFinalized(); self->telemetry_->notifyBlockFinalized(last_finalized_block); auto current_best_block = self->block_tree_->bestBlock(); diff --git a/core/consensus/timeline/types.hpp b/core/consensus/timeline/types.hpp index 4cce3c3203..4ed1f15510 100644 --- a/core/consensus/timeline/types.hpp +++ b/core/consensus/timeline/types.hpp @@ -48,7 +48,7 @@ namespace kagome::consensus { } // Convert to boolean - explicit operator bool() const { + operator bool() const { return count() != 0; } @@ -96,9 +96,8 @@ namespace kagome::consensus { /// Epoch length in slots EpochLength epoch_length{0}; - explicit operator bool() const { - return static_cast(slot_duration) - and static_cast(epoch_length); + operator bool() const { + return (bool)slot_duration and (bool) epoch_length; } void init(SlotDuration _slot_duration, EpochLength _epoch_length) { diff --git a/core/dispute_coordinator/impl/dispute_coordinator_impl.cpp b/core/dispute_coordinator/impl/dispute_coordinator_impl.cpp index 3eec26b0cd..bf902625d7 100644 --- a/core/dispute_coordinator/impl/dispute_coordinator_impl.cpp +++ b/core/dispute_coordinator/impl/dispute_coordinator_impl.cpp @@ -17,7 +17,6 @@ #include "application/app_state_manager.hpp" #include "application/chain_spec.hpp" #include "authority_discovery/query/query.hpp" -#include "blockchain/block_header_repository.hpp" #include "common/main_thread_pool.hpp" #include "common/visitor.hpp" #include "consensus/timeline/timeline.hpp" @@ -139,8 +138,6 @@ namespace kagome::dispute { std::shared_ptr session_keys, std::shared_ptr storage, std::shared_ptr sr25519_crypto_provider, - std::shared_ptr - block_header_repository, std::shared_ptr hasher, std::shared_ptr block_tree, std::shared_ptr core_api, @@ -161,7 +158,6 @@ namespace kagome::dispute { session_keys_(std::move(session_keys)), storage_(std::move(storage)), sr25519_crypto_provider_(std::move(sr25519_crypto_provider)), - block_header_repository_(std::move(block_header_repository)), hasher_(std::move(hasher)), block_tree_(std::move(block_tree)), core_api_(std::move(core_api)), @@ -186,7 +182,6 @@ namespace kagome::dispute { BOOST_ASSERT(session_keys_ != nullptr); BOOST_ASSERT(storage_ != nullptr); BOOST_ASSERT(sr25519_crypto_provider_ != nullptr); - BOOST_ASSERT(block_header_repository_ != nullptr); BOOST_ASSERT(hasher_ != nullptr); BOOST_ASSERT(block_tree_ != nullptr); BOOST_ASSERT(core_api_ != nullptr); @@ -478,7 +473,7 @@ namespace kagome::dispute { } participation_ = - std::make_shared(block_header_repository_, + std::make_shared(block_tree_, hasher_, api_, runtime_info_, @@ -2202,7 +2197,7 @@ namespace kagome::dispute { // Update finality lag if possible if (not block_descriptions.empty()) { - if (auto number_res = block_header_repository_->getNumberByHash( + if (auto number_res = block_tree_->getNumberByHash( block_descriptions.back().block_hash); number_res.has_value()) { if (number_res.value() > undisputed_chain.number) { diff --git a/core/dispute_coordinator/impl/dispute_coordinator_impl.hpp b/core/dispute_coordinator/impl/dispute_coordinator_impl.hpp index 744a37b3c8..38743185f8 100644 --- a/core/dispute_coordinator/impl/dispute_coordinator_impl.hpp +++ b/core/dispute_coordinator/impl/dispute_coordinator_impl.hpp @@ -47,7 +47,6 @@ namespace kagome::authority_discovery { namespace kagome::blockchain { class BlockTree; - class BlockHeaderRepository; } // namespace kagome::blockchain namespace kagome::common { @@ -116,8 +115,6 @@ namespace kagome::dispute { std::shared_ptr session_keys, std::shared_ptr storage, std::shared_ptr sr25519_crypto_provider, - std::shared_ptr - block_header_repository, std::shared_ptr hasher, std::shared_ptr block_tree, std::shared_ptr core_api, @@ -286,7 +283,6 @@ namespace kagome::dispute { std::shared_ptr session_keys_; std::shared_ptr storage_; std::shared_ptr sr25519_crypto_provider_; - std::shared_ptr block_header_repository_; std::shared_ptr hasher_; std::shared_ptr block_tree_; std::shared_ptr core_api_; diff --git a/core/injector/CMakeLists.txt b/core/injector/CMakeLists.txt index 924c0134fd..810b58d05c 100644 --- a/core/injector/CMakeLists.txt +++ b/core/injector/CMakeLists.txt @@ -85,5 +85,6 @@ target_link_libraries(application_injector vrf_provider waitable_timer wasm_compiler + zstd::libzstd_static ) kagome_clear_objects(application_injector) diff --git a/core/injector/application_injector.cpp b/core/injector/application_injector.cpp index 5cd324eb08..07afea544a 100644 --- a/core/injector/application_injector.cpp +++ b/core/injector/application_injector.cpp @@ -47,10 +47,10 @@ #include "application/app_configuration.hpp" #include "application/impl/app_state_manager_impl.hpp" #include "application/impl/chain_spec_impl.hpp" +#include "application/modes/key.hpp" #include "application/modes/precompile_wasm.hpp" #include "application/modes/print_chain_info_mode.hpp" #include "application/modes/recovery_mode.hpp" -#include "application/modes/key.hpp" #include "authority_discovery/publisher/address_publisher.hpp" #include "authority_discovery/query/audi_store_impl.hpp" #include "authority_discovery/query/query_impl.hpp" @@ -58,7 +58,6 @@ #include "authorship/impl/block_builder_impl.hpp" #include "authorship/impl/proposer_impl.hpp" #include "benchmark/block_execution_benchmark.hpp" -#include "blockchain/impl/block_header_repository_impl.hpp" #include "blockchain/impl/block_storage_impl.hpp" #include "blockchain/impl/block_tree_impl.hpp" #include "blockchain/impl/justification_storage_policy.hpp" @@ -195,6 +194,7 @@ #include "runtime/runtime_api/impl/transaction_payment_api.hpp" #include "runtime/wabt/instrument.hpp" #include "runtime/wasm_compiler_definitions.hpp" // this header-file is generated +#include "utils/sptr.hpp" #if KAGOME_WASM_COMPILER_WASM_EDGE == 1 @@ -351,7 +351,7 @@ namespace { } template - sptr get_block_tree(const Injector &injector) { + sptr get_block_tree(const Injector &injector) { auto chain_events_engine = injector .template create(); @@ -359,9 +359,7 @@ namespace { // clang-format off auto block_tree_res = blockchain::BlockTreeImpl::create( injector.template create(), - injector.template create>(), injector.template create>(), - injector.template create>(), injector.template create>(), chain_events_engine, injector.template create(), @@ -376,12 +374,6 @@ namespace { } auto &block_tree = block_tree_res.value(); - auto runtime_upgrade_tracker = - injector.template create>(); - - runtime_upgrade_tracker->subscribeToBlockchainEvents(chain_events_engine, - block_tree); - return block_tree; } @@ -476,20 +468,17 @@ namespace { template std::shared_ptr get_runtime_upgrade_tracker(const Injector &injector) { - auto header_repo = - injector - .template create>(); auto storage = injector.template create>(); auto substitutes = injector .template create>(); - auto block_storage = - injector.template create>(); - auto res = - runtime::RuntimeUpgradeTrackerImpl::create(std::move(header_repo), - std::move(storage), - std::move(substitutes), - std::move(block_storage)); + auto block_tree = injector.template create>(); + auto res = runtime::RuntimeUpgradeTrackerImpl::create( + std::move(storage), std::move(substitutes), std::move(block_tree)); + auto chain_events_engine = + injector + .template create(); + res.value()->subscribeToBlockchainEvents(chain_events_engine); return std::shared_ptr( std::move(res.value())); } @@ -750,8 +739,13 @@ namespace { }), di::bind.template to(), bind_by_lambda( - [](const auto &injector) { return get_block_tree(injector); }), - di::bind.template to(), + [](const auto &injector) { + return get_block_tree(injector); + }), + bind_by_lambda( + [](const auto &injector) { + return injector.template create>(); + }), di::bind.template to(), di::bind.template to(), di::bind.template to(), @@ -797,6 +791,32 @@ namespace { di::bind.template to(), di::bind.template to(), di::bind.template to(), + bind_by_lambda([config](const auto &) { + auto support = parachain::SecureModeSupport::none(); + auto log = log::createLogger("Application", "application"); +#ifdef __linux__ + if (not config->disableSecureMode() and config->usePvfSubprocess() + and config->roles().isAuthority()) { + auto res = parachain::runSecureModeCheckProcess(config->runtimeCacheDirPath()); + if (!res) { + SL_ERROR(log, "Secure mode check failed: {}", res.error()); + exit(EXIT_FAILURE); + } + support = res.value(); + if (not support.isTotallySupported()) { + SL_ERROR(log, + "Secure mode is not supported completely. You can disable it " + "using --insecure-validator-i-know-what-i-do."); + exit(EXIT_FAILURE); + } + } +#else + SL_WARN(log, + "Secure validator mode is not implemented for the current " + "platform. Proceed at your own risk."); +#endif + return toSptr(support); + }), di::bind.template to(), di::bind.template to(), di::bind.template to(), @@ -877,6 +897,7 @@ namespace { di::bind.template to(), di::bind.template to(), di::bind.template to(), + di::bind.template to(), di::bind.template to(), di::bind.template to(), di::bind.template to(), @@ -1133,12 +1154,6 @@ namespace kagome::injector { return pimpl_->injector_.template create>(); } - std::shared_ptr - KagomeNodeInjector::injectRuntimeUpgradeTracker() { - return pimpl_->injector_ - .template create>(); - } - void KagomeNodeInjector::kademliaRandomWalk() { pimpl_->injector_.create>(); } diff --git a/core/injector/application_injector.hpp b/core/injector/application_injector.hpp index efa9b1fcca..2bf2caad87 100644 --- a/core/injector/application_injector.hpp +++ b/core/injector/application_injector.hpp @@ -12,7 +12,6 @@ #include "clock/clock.hpp" #include "network/dispute_request_observer.hpp" -#include "runtime/runtime_upgrade_tracker.hpp" #include "storage/spaced_storage.hpp" namespace soralog { @@ -158,8 +157,6 @@ namespace kagome::injector { std::shared_ptr injectStorage(); std::shared_ptr injectAddressPublisher(); - std::shared_ptr - injectRuntimeUpgradeTracker(); void kademliaRandomWalk(); std::shared_ptr diff --git a/core/network/impl/peer_manager_impl.cpp b/core/network/impl/peer_manager_impl.cpp index 90757356c8..957185556e 100644 --- a/core/network/impl/peer_manager_impl.cpp +++ b/core/network/impl/peer_manager_impl.cpp @@ -38,6 +38,8 @@ OUTCOME_CPP_DEFINE_CATEGORY(kagome::network, PeerManagerImpl::Error, e) { } namespace kagome::network { + constexpr auto kLibp2pCollectGarbage = std::chrono::seconds{30}; + PeerManagerImpl::PeerManagerImpl( std::shared_ptr app_state_manager, libp2p::Host &host, @@ -276,8 +278,6 @@ namespace kagome::network { void PeerManagerImpl::align() { SL_TRACE(log_, "Try to align peers number"); - const auto hard_limit = app_config_.inPeers() + app_config_.inPeersLight() - + app_config_.outPeers(); const auto peer_ttl = app_config_.peeringConfig().peerTtl; align_timer_.reset(); @@ -332,7 +332,7 @@ namespace kagome::network { }); for (; !peers_list.empty() - && (peers_list.size() > hard_limit + && (peers_list.size() > app_config_.maxPeers() || peers_list.back().first == std::numeric_limits::min()); peers_list.pop_back()) { @@ -426,42 +426,38 @@ namespace kagome::network { SL_DEBUG(log_, " address: {}", addr.getStringAddress()); } - host_.connect( - peer_info, - [wp{weak_from_this()}, peer_id](auto res) mutable { - auto self = wp.lock(); - if (not self) { - return; - } + host_.connect(peer_info, [wp{weak_from_this()}, peer_id](auto res) mutable { + auto self = wp.lock(); + if (not self) { + return; + } - if (not res.has_value()) { - SL_DEBUG(self->log_, - "Connecting to peer {} is failed: {}", - peer_id, - res.error()); - self->connecting_peers_.erase(peer_id); - return; - } + if (not res.has_value()) { + SL_DEBUG(self->log_, + "Connecting to peer {} is failed: {}", + peer_id, + res.error()); + self->connecting_peers_.erase(peer_id); + return; + } - auto &connection = res.value(); - auto remote_peer_id_res = connection->remotePeer(); - if (not remote_peer_id_res.has_value()) { - SL_DEBUG( - self->log_, - "Connected, but not identified yet (expecting peer_id={:l})", - peer_id); - self->connecting_peers_.erase(peer_id); - return; - } + auto &connection = res.value(); + auto remote_peer_id_res = connection->remotePeer(); + if (not remote_peer_id_res.has_value()) { + SL_DEBUG(self->log_, + "Connected, but not identified yet (expecting peer_id={:l})", + peer_id); + self->connecting_peers_.erase(peer_id); + return; + } - auto &remote_peer_id = remote_peer_id_res.value(); - if (remote_peer_id == peer_id) { - SL_DEBUG(self->log_, "Connected to peer {}", peer_id); + auto &remote_peer_id = remote_peer_id_res.value(); + if (remote_peer_id == peer_id) { + SL_DEBUG(self->log_, "Connected to peer {}", peer_id); - self->processFullyConnectedPeer(peer_id); - } - }, - kTimeoutForConnecting); + self->processFullyConnectedPeer(peer_id); + } + }); } void PeerManagerImpl::disconnectFromPeer(const PeerId &peer_id) { @@ -759,4 +755,17 @@ namespace kagome::network { } return std::nullopt; } + + void PeerManagerImpl::collectGarbage() { + host_.getNetwork().getConnectionManager().collectGarbage(); + host_.getPeerRepository().getAddressRepository().collectGarbage(); + host_.getPeerRepository().getProtocolRepository().collectGarbage(); + + scheduler_->schedule( + [WEAK_SELF] { + WEAK_LOCK(self); + self->collectGarbage(); + }, + kLibp2pCollectGarbage); + } } // namespace kagome::network diff --git a/core/network/impl/peer_manager_impl.hpp b/core/network/impl/peer_manager_impl.hpp index a8e1f803b3..732fb6342f 100644 --- a/core/network/impl/peer_manager_impl.hpp +++ b/core/network/impl/peer_manager_impl.hpp @@ -55,8 +55,6 @@ namespace kagome::network { class PeerManagerImpl : public PeerManager, public std::enable_shared_from_this { public: - static constexpr std::chrono::seconds kTimeoutForConnecting{15}; - enum class Error { UNDECLARED_COLLATOR = 1 }; PeerManagerImpl( @@ -168,6 +166,8 @@ namespace kagome::network { using IsLight = Tagged; size_t countPeers(PeerType in_out, IsLight in_light = false) const; + void collectGarbage(); + log::Logger log_; libp2p::Host &host_; diff --git a/core/network/impl/protocols/parachain.hpp b/core/network/impl/protocols/parachain.hpp index 33944424f0..8b4a112134 100644 --- a/core/network/impl/protocols/parachain.hpp +++ b/core/network/impl/protocols/parachain.hpp @@ -95,7 +95,15 @@ namespace kagome::network { std::shared_ptr observer_; }; - class ValidationProtocol final : public ParachainProtocol { + class ValidationProtocolReserve { + public: + virtual ~ValidationProtocolReserve() = default; + + virtual void reserve(const PeerId &peer_id, bool add) = 0; + }; + + class ValidationProtocol final : public ParachainProtocol, + public ValidationProtocolReserve { public: ValidationProtocol(ParachainProtocolInject inject, std::shared_ptr observer); @@ -122,7 +130,7 @@ namespace kagome::network { } } void write(const BitfieldDistribution &message); - void reserve(const PeerId &peer_id, bool add); + void reserve(const PeerId &peer_id, bool add) override; private: std::shared_ptr observer_; diff --git a/core/network/impl/protocols/protocol_fetch_chunk_obsolete.hpp b/core/network/impl/protocols/protocol_fetch_chunk_obsolete.hpp index 087e665d6e..5ee803e4dd 100644 --- a/core/network/impl/protocols/protocol_fetch_chunk_obsolete.hpp +++ b/core/network/impl/protocols/protocol_fetch_chunk_obsolete.hpp @@ -16,7 +16,6 @@ #include "blockchain/genesis_block_hash.hpp" #include "log/logger.hpp" #include "network/common.hpp" -#include "network/helpers/scale_message_read_writer.hpp" #include "network/impl/protocols/request_response_protocol.hpp" #include "parachain/validator/parachain_processor.hpp" #include "utils/non_copyable.hpp" diff --git a/core/network/impl/protocols/request_response_protocol.hpp b/core/network/impl/protocols/request_response_protocol.hpp index 8f72dd8635..340add7849 100644 --- a/core/network/impl/protocols/request_response_protocol.hpp +++ b/core/network/impl/protocols/request_response_protocol.hpp @@ -160,8 +160,7 @@ namespace kagome::network { self->protocolName(), peer_id); cb(std::move(stream)); - }, - timeout_); + }); } template diff --git a/core/network/impl/router_libp2p.cpp b/core/network/impl/router_libp2p.cpp index c79fd4d2f2..7ca309191c 100644 --- a/core/network/impl/router_libp2p.cpp +++ b/core/network/impl/router_libp2p.cpp @@ -8,7 +8,6 @@ #include #include - #include "common/main_thread_pool.hpp" #include "network/impl/protocols/beefy_justification_protocol.hpp" #include "network/impl/protocols/beefy_protocol_impl.hpp" @@ -180,7 +179,7 @@ namespace kagome::network { auto &addr_repo = host_.getPeerRepository().getAddressRepository(); // here we put our known public addresses to the repository auto upsert_res = addr_repo.upsertAddresses( - own_info_.id, own_info_.addresses, std::chrono::years(100)); + own_info_.id, own_info_.addresses, libp2p::peer::ttl::kPermanent); if (!upsert_res) { log_->error("Cannot add own addresses to repo: {}", upsert_res.error()); } diff --git a/core/network/impl/state_sync_request_flow.cpp b/core/network/impl/state_sync_request_flow.cpp index b76d99682a..1a62d750c2 100644 --- a/core/network/impl/state_sync_request_flow.cpp +++ b/core/network/impl/state_sync_request_flow.cpp @@ -14,22 +14,6 @@ #include "storage/trie/trie_storage_backend.hpp" namespace kagome::network { - - outcome::result> - StateSyncRequestFlow::create( - std::shared_ptr node_db, - const primitives::BlockInfo &block_info, - const primitives::BlockHeader &block) { - std::unique_ptr flow{ - new StateSyncRequestFlow(node_db, block_info, block)}; - OUTCOME_TRY(done, flow->isKnown(block.state_root)); - flow->done_ = done; - if (not done) { - flow->levels_.emplace_back(Level{.branch_hash = block.state_root}); - } - return flow; - } - StateSyncRequestFlow::StateSyncRequestFlow( std::shared_ptr node_db, const primitives::BlockInfo &block_info, @@ -37,7 +21,12 @@ namespace kagome::network { : node_db_{std::move(node_db)}, block_info_{block_info}, block_{block}, - log_{log::createLogger("StateSync")} {} + done_(isKnown(block.state_root)), + log_{log::createLogger("StateSync")} { + if (not done_) { + levels_.emplace_back(Level{.branch_hash = block.state_root}); + } + } StateRequest StateSyncRequestFlow::nextRequest() const { BOOST_ASSERT(not complete()); @@ -67,8 +56,7 @@ namespace kagome::network { storage::trie::PolkadotCodec codec; BOOST_ASSERT(not complete()); BOOST_OUTCOME_TRY(auto nodes, storage::trie::compactDecode(res.proof)); - auto diff_count = nodes.size(); - auto diff_size = res.proof.size(); + auto diff_count = nodes.size(), diff_size = res.proof.size(); if (diff_count != 0) { stat_count_ += diff_count; stat_size_ += diff_size; @@ -88,7 +76,7 @@ namespace kagome::network { // when trie node is contained in other node value BOOST_OUTCOME_TRY(node, codec.decodeNode(raw)); } - OUTCOME_TRY(level.push({ + level.push({ .node = node, .branch = std::nullopt, .child = level.child, @@ -97,7 +85,7 @@ namespace kagome::network { .hash = it->first, .encoded = std::move(raw), }, - })); + }); nodes.erase(it); return outcome::success(); }; @@ -111,30 +99,22 @@ namespace kagome::network { auto pop_level = true; while (not level.stack.empty()) { auto child = level.value_child; - OUTCOME_TRY(known, isKnown(*child)); - if (child and not known) { + if (child and not isKnown(*child)) { auto &level = levels_.emplace_back(); level.branch_hash = child; pop_level = false; break; } - if (level.value_hash) { - OUTCOME_TRY(known_value, isKnown(*level.value_hash)); - if (not known_value) { - auto it = nodes.find(*level.value_hash); - if (it == nodes.end()) { - return outcome::success(); - } - OUTCOME_TRY( - node_db_->values().put(it->first, std::move(it->second.first))); - known_.emplace(it->first); + if (level.value_hash and not isKnown(*level.value_hash)) { + auto it = nodes.find(*level.value_hash); + if (it == nodes.end()) { + return outcome::success(); } + OUTCOME_TRY(node_db_->put(it->first, std::move(it->second.first))); + known_.emplace(it->first); } - OUTCOME_TRY(level.branchInit()); - while (not level.branch_end) { - OUTCOME_TRY(known, isKnown(*level.branch_hash)); - if (not level.branch_hash or known) { - OUTCOME_TRY(level.branchNext()); + for (level.branchInit(); not level.branch_end; level.branchNext()) { + if (not level.branch_hash or isKnown(*level.branch_hash)) { continue; } auto it = nodes.find(*level.branch_hash); @@ -146,11 +126,11 @@ namespace kagome::network { } if (level.branch_end) { auto &t = level.stack.back().t; - OUTCOME_TRY(node_db_->nodes().put(t.hash, std::move(t.encoded))); + OUTCOME_TRY(node_db_->put(t.hash, std::move(t.encoded))); known_.emplace(t.hash); - OUTCOME_TRY(level.pop()); + level.pop(); if (not level.stack.empty()) { - OUTCOME_TRY(level.branchNext()); + level.branchNext(); } } } @@ -162,17 +142,16 @@ namespace kagome::network { return outcome::success(); } - outcome::result StateSyncRequestFlow::isKnown( - const common::Hash256 &hash) { + bool StateSyncRequestFlow::isKnown(const common::Hash256 &hash) { if (hash == storage::trie::kEmptyRootHash) { return true; } if (known_.find(hash) != known_.end()) { return true; } - OUTCOME_TRY(known_node, node_db_->nodes().contains(hash)); - OUTCOME_TRY(known_value, node_db_->values().contains(hash)); - if (known_node || known_value) { + if (auto node_res = node_db_->contains(hash), + value_res = node_db_->contains(hash); + (node_res and node_res.value()) or (value_res and value_res.value())) { known_.emplace(hash); return true; } diff --git a/core/network/impl/state_sync_request_flow.hpp b/core/network/impl/state_sync_request_flow.hpp index b0b1a69948..2c7cc0a2fc 100644 --- a/core/network/impl/state_sync_request_flow.hpp +++ b/core/network/impl/state_sync_request_flow.hpp @@ -33,7 +33,7 @@ namespace kagome::network { using Level = storage::trie::RawCursor; - static outcome::result> create( + StateSyncRequestFlow( std::shared_ptr node_db, const primitives::BlockInfo &block_info, const primitives::BlockHeader &block); @@ -55,12 +55,7 @@ namespace kagome::network { outcome::result onResponse(const StateResponse &res); private: - StateSyncRequestFlow( - std::shared_ptr node_db, - const primitives::BlockInfo &block_info, - const primitives::BlockHeader &block); - - outcome::result isKnown(const common::Hash256 &hash); + bool isKnown(const common::Hash256 &hash); std::shared_ptr node_db_; diff --git a/core/network/impl/synchronizer_impl.cpp b/core/network/impl/synchronizer_impl.cpp index 5ab880162c..b7fe69878b 100644 --- a/core/network/impl/synchronizer_impl.cpp +++ b/core/network/impl/synchronizer_impl.cpp @@ -19,11 +19,11 @@ #include "consensus/grandpa/environment.hpp" #include "consensus/grandpa/has_authority_set_change.hpp" #include "consensus/timeline/timeline.hpp" -#include "network/impl/state_sync_request_flow.hpp" #include "network/peer_manager.hpp" #include "network/protocols/state_protocol.hpp" #include "network/protocols/sync_protocol.hpp" #include "network/types/block_attributes.hpp" +#include "network/warp/protocol.hpp" #include "primitives/common.hpp" #include "storage/predefined_keys.hpp" #include "storage/trie/serialization/trie_serializer.hpp" @@ -822,13 +822,7 @@ namespace kagome::network { return; } if (not state_sync_flow_ or state_sync_flow_->blockInfo() != block) { - auto flow_res = - StateSyncRequestFlow::create(trie_node_db_, block, header); - if (not flow_res) { - handler(flow_res.error()); - return; - } - state_sync_flow_.emplace(std::move(*flow_res.value())); + state_sync_flow_.emplace(trie_node_db_, block, header); } state_sync_.emplace(StateSync{ .peer = peer_id, @@ -1333,20 +1327,18 @@ namespace kagome::network { bool SynchronizerImpl::fetchJustificationRange(primitives::BlockNumber min, FetchJustificationRangeCb cb) { - BlocksRequest request{ - .fields = BlockAttribute::JUSTIFICATION, - .from = min, - .direction = Direction::ASCENDING, - .max = std::nullopt, - .multiple_justifications = false, - }; - auto chosen = chooseJustificationPeer(min, request.fingerprint()); + auto hash_res = block_tree_->getHashByNumber(min); + if (not hash_res) { + return false; + } + auto &hash = hash_res.value(); + auto chosen = chooseJustificationPeer(min, min); if (not chosen) { return false; } busy_peers_.emplace(*chosen); auto cb2 = [weak{weak_from_this()}, min, cb{std::move(cb)}, peer{*chosen}]( - outcome::result r) mutable { + outcome::result r) mutable { auto self = weak.lock(); if (not self) { return; @@ -1355,30 +1347,23 @@ namespace kagome::network { if (not r) { return cb(r.error()); } - auto &blocks = r.value().blocks; - if (blocks.empty()) { - return cb(Error::EMPTY_RESPONSE); - } - auto number = min; - for (auto &block : blocks) { - if (block.justification) { - self->grandpa_environment_->applyJustification( - {number, block.hash}, - *block.justification, - [cb{std::move(cb)}](outcome::result r) { - if (not r) { - cb(r.error()); - } else { - cb(std::nullopt); - } - }); - return; - } - ++number; + auto &blocks = r.value().proofs; + for (const auto &block : blocks) { + self->grandpa_environment_->applyJustification( + block.justification.block_info, + {scale::encode(block.justification).value()}, + [cb{std::move(cb)}](outcome::result r) { + if (not r) { + cb(r.error()); + } else { + cb(std::nullopt); + } + }); + return; } - cb(min + blocks.size()); + cb(min); }; - fetch(*chosen, std::move(request), "justification range", std::move(cb2)); + router_->getWarpProtocol()->doRequest(*chosen, hash, std::move(cb2)); return true; } diff --git a/core/network/notifications/protocol.cpp b/core/network/notifications/protocol.cpp index 0a58cfcd62..29f37efc17 100644 --- a/core/network/notifications/protocol.cpp +++ b/core/network/notifications/protocol.cpp @@ -385,6 +385,9 @@ namespace kagome::network::notifications { } for (auto &conn : host_->getNetwork().getConnectionManager().getConnections()) { + if (conn->isClosed()) { + continue; + } auto peer_id = conn->remotePeer().value(); if (reserved_.contains(peer_id)) { continue; @@ -402,7 +405,7 @@ namespace kagome::network::notifications { size_t Protocol::peerCount(bool out) { size_t count = 0; - if (out) { + if (not out) { for (auto &p : peers_in_) { if (reserved_.contains(p.first)) { continue; diff --git a/core/parachain/availability/recovery/recovery_impl.cpp b/core/parachain/availability/recovery/recovery_impl.cpp index 3f8004a43b..f58af6dc84 100644 --- a/core/parachain/availability/recovery/recovery_impl.cpp +++ b/core/parachain/availability/recovery/recovery_impl.cpp @@ -9,6 +9,7 @@ #include "application/chain_spec.hpp" #include "authority_discovery/query/query.hpp" #include "blockchain/block_tree.hpp" +#include "crypto/key_store/session_keys.hpp" #include "log/formatters/optional.hpp" #include "network/impl/protocols/protocol_fetch_available_data.hpp" #include "network/impl/protocols/protocol_fetch_chunk.hpp" @@ -47,6 +48,7 @@ namespace { namespace kagome::parachain { constexpr size_t kParallelRequests = 50; + constexpr size_t kMaxSizeOfDataToRecoverFromBackers = 1 << 20; // 1Mb RecoveryImpl::RecoveryImpl( std::shared_ptr chain_spec, @@ -56,7 +58,8 @@ namespace kagome::parachain { std::shared_ptr av_store, std::shared_ptr query_audi, std::shared_ptr router, - std::shared_ptr pm) + std::shared_ptr pm, + std::shared_ptr session_keys) : logger_{log::createLogger("Recovery", "parachain")}, hasher_{std::move(hasher)}, block_tree_{std::move(block_tree)}, @@ -64,7 +67,8 @@ namespace kagome::parachain { av_store_{std::move(av_store)}, query_audi_{std::move(query_audi)}, router_{std::move(router)}, - pm_{std::move(pm)} { + pm_{std::move(pm)}, + session_keys_{std::move(session_keys)} { // Register metrics metrics_registry_->registerCounterFamily( fullRecoveriesStartedMetricName, "Total number of started recoveries"); @@ -148,16 +152,18 @@ namespace kagome::parachain { } } + auto val2chunk = [n_validators{session->validators.size()}, + start_pos](ValidatorIndex validator_index) -> ChunkIndex { + return (start_pos + validator_index) % n_validators; + }; + Active active; active.erasure_encoding_root = receipt.descriptor.erasure_encoding_root; active.chunks_total = session->validators.size(); active.chunks_required = _min.value(); active.cb.emplace_back(std::move(cb)); active.discovery_keys = session->discovery_keys; - active.val2chunk = [n_validators{active.chunks_total}, start_pos]( - ValidatorIndex validator_index) -> ChunkIndex { - return (start_pos + validator_index) % n_validators; - }; + active.val2chunk = val2chunk; if (backing_group.has_value()) { const auto group = backing_group.value(); @@ -184,7 +190,35 @@ namespace kagome::parachain { active_.emplace(candidate_hash, std::move(active)); lock.unlock(); - full_from_bakers_recovery_prepare(candidate_hash); + + std::optional available_data_size; + + if (auto indexed_key_pair_opt = + session_keys_->getParaKeyPair(session->validators); + indexed_key_pair_opt.has_value()) { + auto out_validator_index = indexed_key_pair_opt->second; + auto index_of_our_chunk = val2chunk(out_validator_index); + auto min_chunks = _min.value(); + + auto our_chunk = av_store_->getChunk(candidate_hash, index_of_our_chunk); + + if (not our_chunk.has_value()) { + SL_WARN(logger_, + "Our node does not have a chunk which it must be have"); + } else { + available_data_size = our_chunk->chunk.size() * min_chunks; + } + } else { + SL_WARN(logger_, "Cannot retrieve out validator index"); + } + + // Do recovery from backers strategy iff available + // date size can be calculated and less than limit + if (available_data_size < kMaxSizeOfDataToRecoverFromBackers) { + return full_from_bakers_recovery_prepare(candidate_hash); + } + + systematic_chunks_recovery_prepare(candidate_hash); } void RecoveryImpl::full_from_bakers_recovery_prepare( @@ -335,7 +369,7 @@ namespace kagome::parachain { } active.order.emplace_back(validator_index); } - std::shuffle(active.order.begin(), active.order.end(), random_); + std::ranges::shuffle(active.order, random_); active.queried.clear(); active.chunks_active = 0; diff --git a/core/parachain/availability/recovery/recovery_impl.hpp b/core/parachain/availability/recovery/recovery_impl.hpp index 8c12d379a2..94ded4cc3b 100644 --- a/core/parachain/availability/recovery/recovery_impl.hpp +++ b/core/parachain/availability/recovery/recovery_impl.hpp @@ -30,7 +30,8 @@ namespace kagome::blockchain { namespace kagome::crypto { class Hasher; -} + class SessionKeys; +} // namespace kagome::crypto namespace kagome::network { class PeerManager; @@ -56,7 +57,8 @@ namespace kagome::parachain { std::shared_ptr av_store, std::shared_ptr query_audi, std::shared_ptr router, - std::shared_ptr pm); + std::shared_ptr pm, + std::shared_ptr session_keys); void recover(const HashedCandidateReceipt &hashed_receipt, SessionIndex session_index, @@ -134,6 +136,7 @@ namespace kagome::parachain { std::shared_ptr query_audi_; std::shared_ptr router_; std::shared_ptr pm_; + std::shared_ptr session_keys_; std::mutex mutex_; std::default_random_engine random_; diff --git a/core/parachain/pvf/clone.hpp b/core/parachain/pvf/clone.hpp new file mode 100644 index 0000000000..c6352acf85 --- /dev/null +++ b/core/parachain/pvf/clone.hpp @@ -0,0 +1,132 @@ +/** + * Copyright Quadrivium LLC + * All Rights Reserved + * SPDX-License-Identifier: Apache-2.0 + */ + +#pragma once + +#include +#include +#include + +#ifdef __linux__ +#include +#include +#endif + +#include "parachain/pvf/pvf_worker_types.hpp" + +namespace kagome::parachain::clone { + constexpr size_t kCloneStackSize = 2 << 20; + + enum class CloneError : uint8_t { + kCallbackFailed, + }; + Q_ENUM_ERROR_CODE(CloneError) { + using E = decltype(e); + switch (e) { + case E::kCallbackFailed: + return "Callback failed"; + } + abort(); + } + +#ifdef __linux__ + // https://github.com/paritytech/polkadot-sdk/blob/f4a196ab1473856c9c5992239fcc2f14c2c42914/polkadot/node/core/pvf/common/src/worker/security/clone.rs#L35-L54 + /// Try to run clone(2) on the current worker. + /// + /// SAFETY: new process should be either spawned within a single threaded + /// process, or use only async-signal-safe functions. + template + inline outcome::result clone(bool have_unshare_newuser, const Cb &cb) { + Buffer stack(kCloneStackSize); + // https://github.com/paritytech/polkadot-sdk/blob/f4a196ab1473856c9c5992239fcc2f14c2c42914/polkadot/node/core/pvf/common/src/worker/security/clone.rs#L75-L93 + int flags = CLONE_NEWCGROUP | CLONE_NEWIPC | CLONE_NEWNET | CLONE_NEWNS + | CLONE_NEWPID | CLONE_NEWUTS | SIGCHLD; + if (not have_unshare_newuser) { + flags |= CLONE_NEWUSER; + } + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-vararg) + pid_t pid = ::clone( + [](void *arg) { + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-reinterpret-cast) + auto &cb = *reinterpret_cast(arg); + return cb() ? EXIT_SUCCESS : EXIT_FAILURE; + }, + // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-pointer-arithmetic) + stack.data() + stack.size(), + flags, + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-const-cast) + const_cast(static_cast(&cb))); + if (pid == -1) { + return std::errc{errno}; + } + return pid; + } +#endif + + inline outcome::result wait(pid_t pid) { + int status = 0; + if (waitpid(pid, &status, 0) == -1) { + return std::errc{errno}; + } + if (not WIFEXITED(status) or WEXITSTATUS(status) != EXIT_SUCCESS) { + return CloneError::kCallbackFailed; + } + return outcome::success(); + } + + // https://github.com/paritytech/polkadot-sdk/blob/f4a196ab1473856c9c5992239fcc2f14c2c42914/polkadot/node/core/pvf/execute-worker/src/lib.rs#L245-L293 + /// Call callback either directly, or inside `clone`, or inside `fork`. + inline outcome::result cloneOrFork(const log::Logger &log, + const PvfWorkerInputConfig &config, + const auto &cb) { + auto cb_log = [&] { + auto r = cb(); + if (not r) { + SL_WARN(log, "cloneOrFork cb returned error: {}", r.error()); + return false; + } + return true; + }; + if (config.force_disable_secure_mode) { + if (not cb_log()) { + return CloneError::kCallbackFailed; + } + return outcome::success(); + } + std::optional pid; +#ifdef __linux__ + if (config.secure_mode_support.can_do_secure_clone) { + BOOST_OUTCOME_TRY(pid, clone(config.secure_mode_support.chroot, cb_log)); + } +#endif + if (not pid) { + pid = fork(); + if (pid == -1) { + return std::errc{errno}; + } + if (pid == 0) { + _Exit(cb_log() ? EXIT_SUCCESS : EXIT_FAILURE); + } + } + return wait(*pid); + } + + // https://github.com/paritytech/polkadot-sdk/blob/f4a196ab1473856c9c5992239fcc2f14c2c42914/polkadot/node/core/pvf/common/src/worker/security/clone.rs#L56-L63 + /// Runs a check for clone(2) with all sandboxing flags and returns an error + /// indicating whether it can be fully enabled on the current Linux + /// environment. + /// + /// SAFETY: new process should be either spawned within a single threaded + /// process, or use only async-signal-safe functions. + inline outcome::result check() { +#ifdef __linux__ + OUTCOME_TRY(pid, clone(false, [] { return true; })); + return wait(pid); +#else + return std::errc::not_supported; +#endif + } +} // namespace kagome::parachain::clone diff --git a/core/parachain/pvf/kagome_pvf_worker.cpp b/core/parachain/pvf/kagome_pvf_worker.cpp index 9ad170c465..49e04d11cd 100644 --- a/core/parachain/pvf/kagome_pvf_worker.cpp +++ b/core/parachain/pvf/kagome_pvf_worker.cpp @@ -5,7 +5,6 @@ */ #include -#include #include #include #include @@ -23,6 +22,7 @@ #include #include +#include #include #include #include @@ -34,6 +34,7 @@ #include "common/bytestr.hpp" #include "log/configurator.hpp" #include "log/logger.hpp" +#include "parachain/pvf/clone.hpp" #include "parachain/pvf/kagome_pvf_worker.hpp" #include "parachain/pvf/kagome_pvf_worker_injector.hpp" #include "parachain/pvf/pvf_worker_types.hpp" @@ -62,6 +63,8 @@ } namespace kagome::parachain { + using unix = boost::asio::local::stream_protocol; + namespace { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) static kagome::log::Logger logger; @@ -229,26 +232,21 @@ namespace kagome::parachain { } #endif - outcome::result readStdin(std::span out) { - std::cin.read( - // NOLINTNEXTLINE(cppcoreguidelines-pro-type-reinterpret-cast) - reinterpret_cast(out.data()), - // NOLINTNEXTLINE(cppcoreguidelines-narrowing-conversions) - out.size()); - if (not std::cin.good()) { - return std::errc::io_error; - } - return outcome::success(); - } - template - outcome::result decodeInput() { + outcome::result decodeInput(unix::socket &socket) { // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init,hicpp-member-init) std::array length_bytes; - OUTCOME_TRY(readStdin(length_bytes)); + boost::system::error_code ec; + boost::asio::read(socket, boost::asio::buffer(length_bytes), ec); + if (ec) { + return ec; + } OUTCOME_TRY(message_length, scale::decode(length_bytes)); std::vector packed_message(message_length, 0); - OUTCOME_TRY(readStdin(packed_message)); + boost::asio::read(socket, boost::asio::buffer(packed_message), ec); + if (ec) { + return ec; + } return scale::decode(packed_message); } @@ -282,8 +280,16 @@ namespace kagome::parachain { } } - outcome::result pvf_worker_main_outcome() { - OUTCOME_TRY(input_config, decodeInput()); + outcome::result pvf_worker_main_outcome( + const std::string &unix_socket_path) { + boost::asio::io_context io_context; + unix::socket socket{io_context}; + boost::system::error_code ec; + socket.connect(unix_socket_path, ec); + if (ec) { + return ec; + } + OUTCOME_TRY(input_config, decodeInput(socket)); kagome::log::tuneLoggingSystem(input_config.log_params); SL_VERBOSE(logger, "Cache directory: {}", input_config.cache_dir); @@ -347,7 +353,7 @@ namespace kagome::parachain { OUTCOME_TRY(factory, createModuleFactory(injector, input_config.engine)); std::shared_ptr module; while (true) { - OUTCOME_TRY(input, decodeInput()); + OUTCOME_TRY(input, decodeInput(socket)); if (auto *code_params = std::get_if(&input)) { auto &path = code_params->path; @@ -361,26 +367,27 @@ namespace kagome::parachain { SL_ERROR(logger, "PvfWorkerInputCodeParams expected"); return std::errc::invalid_argument; } - OUTCOME_TRY(instance, module->instantiate()); - - OUTCOME_TRY(ctx, runtime::RuntimeContextFactory::stateless(instance)); - OUTCOME_TRY( - result, - instance->callExportFunction(ctx, "validate_block", input_args)); - OUTCOME_TRY(instance->resetEnvironment()); - OUTCOME_TRY(len, scale::encode(result.size())); - - std::cout.write( - // NOLINTNEXTLINE(cppcoreguidelines-pro-type-reinterpret-cast) - reinterpret_cast(len.data()), - // NOLINTNEXTLINE(cppcoreguidelines-narrowing-conversions) - len.size()); - std::cout.write( - // NOLINTNEXTLINE(cppcoreguidelines-pro-type-reinterpret-cast) - reinterpret_cast(result.data()), - // NOLINTNEXTLINE(cppcoreguidelines-narrowing-conversions) - result.size()); - std::cout.flush(); + auto forked = [&]() -> outcome::result { + OUTCOME_TRY(instance, module->instantiate()); + + OUTCOME_TRY(ctx, runtime::RuntimeContextFactory::stateless(instance)); + OUTCOME_TRY( + result, + instance->callExportFunction(ctx, "validate_block", input_args)); + OUTCOME_TRY(instance->resetEnvironment()); + OUTCOME_TRY(len, scale::encode(result.size())); + + boost::asio::write(socket, boost::asio::buffer(len), ec); + if (ec) { + return ec; + } + boost::asio::write(socket, boost::asio::buffer(result), ec); + if (ec) { + return ec; + } + return outcome::success(); + }; + OUTCOME_TRY(clone::cloneOrFork(logger, input_config, forked)); } } @@ -399,14 +406,12 @@ namespace kagome::parachain { } kagome::log::setLoggingSystem(logging_system); logger = kagome::log::createLogger("PVF Worker", "parachain"); - - if (!checkEnvVarsEmpty(env)) { - logger->error( - "PVF worker processes must not have any environment variables."); + if (argc < 2) { + SL_ERROR(logger, "missing unix socket path arg"); return EXIT_FAILURE; } - - if (auto r = pvf_worker_main_outcome(); not r) { + // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-pointer-arithmetic) + if (auto r = pvf_worker_main_outcome(argv[1]); not r) { SL_ERROR(logger, "PVF worker process failed: {}", r.error()); return EXIT_FAILURE; } diff --git a/core/parachain/pvf/kagome_pvf_worker_injector.hpp b/core/parachain/pvf/kagome_pvf_worker_injector.hpp index 9893aa1336..0f10ab2929 100644 --- a/core/parachain/pvf/kagome_pvf_worker_injector.hpp +++ b/core/parachain/pvf/kagome_pvf_worker_injector.hpp @@ -15,11 +15,14 @@ #include "crypto/ed25519/ed25519_provider_impl.hpp" #include "crypto/elliptic_curves/elliptic_curves_impl.hpp" #include "crypto/hasher/hasher_impl.hpp" +#include "crypto/key_store.hpp" #include "crypto/pbkdf2/impl/pbkdf2_provider_impl.hpp" #include "crypto/secp256k1/secp256k1_provider_impl.hpp" #include "crypto/sr25519/sr25519_provider_impl.hpp" #include "host_api/impl/host_api_factory_impl.hpp" #include "injector/bind_by_lambda.hpp" +#include "offchain/offchain_persistent_storage.hpp" +#include "offchain/offchain_worker_pool.hpp" #include "parachain/pvf/pvf_worker_types.hpp" #include "runtime/binaryen/instance_environment_factory.hpp" #include "runtime/binaryen/module/module_factory_impl.hpp" @@ -27,6 +30,7 @@ #include "runtime/common/runtime_properties_cache_impl.hpp" #include "runtime/memory_provider.hpp" #include "runtime/module.hpp" +#include "runtime/runtime_instances_pool.hpp" #include "runtime/wasm_compiler_definitions.hpp" // this header-file is generated #include "storage/trie/serialization/trie_serializer_impl.hpp" #include "storage/trie/trie_storage.hpp" diff --git a/core/parachain/pvf/pvf_worker_types.hpp b/core/parachain/pvf/pvf_worker_types.hpp index dad1bd6189..e4b9b37250 100644 --- a/core/parachain/pvf/pvf_worker_types.hpp +++ b/core/parachain/pvf/pvf_worker_types.hpp @@ -9,6 +9,7 @@ #include #include "common/buffer.hpp" +#include "parachain/pvf/secure_mode_precheck.hpp" #include "runtime/runtime_context.hpp" #include "scale/scale.hpp" #include "scale/std_variant.hpp" @@ -31,12 +32,13 @@ namespace kagome::parachain { const application::AppConfiguration &app_config); struct PvfWorkerInputConfig { - SCALE_TIE(4); + SCALE_TIE(5); RuntimeEngine engine; std::string cache_dir; std::vector log_params; bool force_disable_secure_mode; + SecureModeSupport secure_mode_support; }; struct PvfWorkerInputCodeParams { diff --git a/core/parachain/pvf/secure_mode.hpp b/core/parachain/pvf/secure_mode.hpp index 617370360f..3100b1d2ee 100644 --- a/core/parachain/pvf/secure_mode.hpp +++ b/core/parachain/pvf/secure_mode.hpp @@ -22,6 +22,11 @@ namespace kagome::parachain { std::string message_; }; + + inline auto make_exception_ptr(SecureModeError e) { + return std::make_exception_ptr(std::move(e)); + } + template using SecureModeOutcome = CustomOutcome; diff --git a/core/parachain/pvf/secure_mode_precheck.cpp b/core/parachain/pvf/secure_mode_precheck.cpp index a673bc3cfc..d60a70f9af 100644 --- a/core/parachain/pvf/secure_mode_precheck.cpp +++ b/core/parachain/pvf/secure_mode_precheck.cpp @@ -23,6 +23,7 @@ #include "common/buffer_view.hpp" #include "log/configurator.hpp" #include "log/logger.hpp" +#include "parachain/pvf/clone.hpp" #include "parachain/pvf/secure_mode.hpp" #include "utils/get_exe_path.hpp" @@ -36,8 +37,17 @@ namespace kagome::parachain { std::filesystem::path cache_dir = original_cache_dir; SecureModeSupport support = SecureModeSupport::none(); auto logger = log::createLogger("CheckSecureMode", "parachain"); + if (auto res = clone::check()) { + support.can_do_secure_clone = true; + } else { + SL_WARN(logger, + "Secure mode incomplete, cannot enable clone for PVF " + "worker: {}", + res.error()); + } if (auto res = changeRoot(cache_dir)) { support.chroot = true; + cache_dir = "/"; } else { SL_WARN( logger, @@ -46,7 +56,6 @@ namespace kagome::parachain { cache_dir.c_str(), res.error()); } - cache_dir = "/"; if (auto res = enableLandlock(cache_dir)) { support.landlock = true; @@ -70,8 +79,8 @@ namespace kagome::parachain { } SecureModeOutcome runSecureModeCheckProcess( - boost::asio::io_context &io_context, const std::filesystem::path &cache_dir) { + boost::asio::io_context io_context; namespace process_v2 = boost::process::v2; boost::asio::readable_pipe pipe{io_context}; // input passed as CLI arguments to enable users to manually run the check diff --git a/core/parachain/pvf/secure_mode_precheck.hpp b/core/parachain/pvf/secure_mode_precheck.hpp index 9d4f5d0dc3..6802d462e8 100644 --- a/core/parachain/pvf/secure_mode_precheck.hpp +++ b/core/parachain/pvf/secure_mode_precheck.hpp @@ -6,7 +6,6 @@ #pragma once -#include #include #include "parachain/pvf/secure_mode.hpp" @@ -20,7 +19,7 @@ namespace kagome::parachain { * platform */ struct SecureModeSupport { - SCALE_TIE(3); + SCALE_TIE(4); // The filesystem root of the PVF process can be set to the worker directory bool chroot; @@ -32,8 +31,16 @@ namespace kagome::parachain { // process bool seccomp; + // Whether we are able to call `clone` with all sandboxing flags. + bool can_do_secure_clone; + static SecureModeSupport none() { - return {false, false, false}; + return { + .chroot = false, + .landlock = false, + .seccomp = false, + .can_do_secure_clone = false, + }; } bool isTotallySupported() const { @@ -51,7 +58,6 @@ namespace kagome::parachain { * Spawns a child process that executes checkSecureMode */ SecureModeOutcome runSecureModeCheckProcess( - boost::asio::io_context &io_context, const std::filesystem::path &cache_dir); /** diff --git a/core/parachain/pvf/workers.cpp b/core/parachain/pvf/workers.cpp index 5551676a6d..0d0fc7fa8b 100644 --- a/core/parachain/pvf/workers.cpp +++ b/core/parachain/pvf/workers.cpp @@ -6,8 +6,7 @@ #include "parachain/pvf/workers.hpp" -#include -#include +#include #include #include #include @@ -15,26 +14,19 @@ #include "application/app_configuration.hpp" #include "common/main_thread_pool.hpp" +#include "filesystem/common.hpp" #include "parachain/pvf/pvf_worker_types.hpp" #include "utils/get_exe_path.hpp" #include "utils/weak_macro.hpp" namespace kagome::parachain { - constexpr auto kMetricQueueSize = "kagome_pvf_queue_size"; + using unix = boost::asio::local::stream_protocol; - struct AsyncPipe : boost::process::async_pipe { - using async_pipe::async_pipe; - using lowest_layer_type = AsyncPipe; - }; + constexpr auto kMetricQueueSize = "kagome_pvf_queue_size"; struct ProcessAndPipes : std::enable_shared_from_this { - AsyncPipe pipe_stdin; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members) - AsyncPipe &writer; - AsyncPipe pipe_stdout; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members) - AsyncPipe &reader; boost::process::child process; + std::optional socket; std::shared_ptr writing = std::make_shared(); std::shared_ptr reading = std::make_shared(); @@ -44,23 +36,18 @@ namespace kagome::parachain { ProcessAndPipes(boost::asio::io_context &io_context, const std::string &exe, + const std::string &unix_socket_path, const Config &config) - : pipe_stdin{io_context}, - writer{pipe_stdin}, - pipe_stdout{io_context}, - reader{pipe_stdout}, - process{ - exe, - boost::process::args({"pvf-worker"}), - boost::process::env(boost::process::environment()), + : process{ + exe, + boost::process::args({"pvf-worker", unix_socket_path}), + boost::process::env(boost::process::environment()), // LSAN doesn't work in secure mode #ifdef KAGOME_WITH_ASAN - boost::process::env["ASAN_OPTIONS"] = - config.disable_lsan ? "detect_leaks=0" : "", + boost::process::env["ASAN_OPTIONS"] = + config.disable_lsan ? "detect_leaks=0" : "", #endif - boost::process::std_out > pipe_stdout, - boost::process::std_in < pipe_stdin, - } { + } { } void write(Buffer data, auto cb) { @@ -68,7 +55,7 @@ namespace kagome::parachain { scale::encode(data.size()).value()); *writing = std::move(data); boost::asio::async_write( - writer, + *socket, libp2p::asioBuffer(*len), [WEAK_SELF, cb, len](boost::system::error_code ec, size_t) mutable { WEAK_LOCK(self); @@ -76,7 +63,7 @@ namespace kagome::parachain { return cb(ec); } boost::asio::async_write( - self->writer, + *self->socket, libp2p::asioBuffer(*self->writing), [weak_self, cb](boost::system::error_code ec, size_t) mutable { WEAK_LOCK(self); @@ -95,7 +82,7 @@ namespace kagome::parachain { void read(auto cb) { auto len = std::make_shared>(); boost::asio::async_read( - reader, + *socket, libp2p::asioBuffer(*len), [WEAK_SELF, cb{std::move(cb)}, len](boost::system::error_code ec, size_t) mutable { @@ -109,7 +96,7 @@ namespace kagome::parachain { } self->reading->resize(len_res.value()); boost::asio::async_read( - self->reader, + *self->socket, libp2p::asioBuffer(*self->reading), [cb{std::move(cb)}, reading{self->reading}]( boost::system::error_code ec, size_t) mutable { @@ -124,6 +111,7 @@ namespace kagome::parachain { PvfWorkers::PvfWorkers(const application::AppConfiguration &app_config, common::MainThreadPool &main_thread_pool, + SecureModeSupport secure_mode_support, std::shared_ptr scheduler) : io_context_{main_thread_pool.io_context()}, main_pool_handler_{main_thread_pool.handlerStarted()}, @@ -135,6 +123,7 @@ namespace kagome::parachain { .cache_dir = app_config.runtimeCacheDirPath(), .log_params = app_config.log(), .force_disable_secure_mode = app_config.disableSecureMode(), + .secure_mode_support = secure_mode_support, } { metrics_registry_->registerGaugeFamily(kMetricQueueSize, "pvf queue size"); std::unordered_map kind_name{ @@ -162,20 +151,46 @@ namespace kagome::parachain { #if defined(__linux__) && defined(KAGOME_WITH_ASAN) config.disable_lsan = !worker_config_.force_disable_secure_mode; #endif - auto process = - std::make_shared(*io_context_, exe_, config); - process->writeScale( - worker_config_, - [WEAK_SELF, job{std::move(job)}, used{std::move(used)}, process]( - outcome::result r) mutable { - WEAK_LOCK(self); - if (not r) { - return job.cb(r.error()); - } - self->writeCode(std::move(job), - {.process = std::move(process)}, - std::move(used)); - }); + auto unix_socket_path = filesystem::unique_path( + std::filesystem::path{worker_config_.cache_dir} + / "unix_socket.%%%%%%"); + std::error_code ec; + std::filesystem::remove(unix_socket_path, ec); + if (ec) { + return job.cb(ec); + } + auto acceptor = std::make_shared( + *io_context_, unix_socket_path.native()); + auto process = std::make_shared( + *io_context_, exe_, unix_socket_path, config); + acceptor->async_accept([WEAK_SELF, + job{std::move(job)}, + used, + unix_socket_path, + acceptor, + process{std::move(process)}]( + boost::system::error_code ec, + unix::socket &&socket) mutable { + std::error_code ec2; + std::filesystem::remove(unix_socket_path, ec2); + WEAK_LOCK(self); + if (ec) { + return job.cb(ec); + } + process->socket = std::move(socket); + process->writeScale( + self->worker_config_, + [weak_self, job{std::move(job)}, used{std::move(used)}, process]( + outcome::result r) mutable { + WEAK_LOCK(self); + if (not r) { + return job.cb(r.error()); + } + self->writeCode(std::move(job), + {.process = std::move(process)}, + std::move(used)); + }); + }); return; } findFree(std::move(job)); diff --git a/core/parachain/pvf/workers.hpp b/core/parachain/pvf/workers.hpp index b9dcbde5ad..d7d4ecd837 100644 --- a/core/parachain/pvf/workers.hpp +++ b/core/parachain/pvf/workers.hpp @@ -43,6 +43,7 @@ namespace kagome::parachain { public: PvfWorkers(const application::AppConfiguration &app_config, common::MainThreadPool &main_thread_pool, + SecureModeSupport secure_mode_support, std::shared_ptr scheduler); using Cb = std::function)>; diff --git a/core/parachain/validator/impl/candidates.hpp b/core/parachain/validator/impl/candidates.hpp index 001fc89adb..bc4e177023 100644 --- a/core/parachain/validator/impl/candidates.hpp +++ b/core/parachain/validator/impl/candidates.hpp @@ -262,11 +262,13 @@ namespace kagome::parachain { boost::variant; struct Candidates { - std::unordered_map candidates; - std::unordered_map< - Hash, - std::unordered_map>> - by_parent; + using ByParaId = + std::unordered_map>; + using ByRelayParent = std::unordered_map; + using StateByCandidate = std::unordered_map; + + StateByCandidate candidates; + ByRelayParent by_parent; log::Logger logger = log::createLogger("Candidates", "parachain"); std::vector frontier_hypotheticals( diff --git a/core/parachain/validator/impl/parachain_processor.cpp b/core/parachain/validator/impl/parachain_processor.cpp index 66fe599a92..f5f7080c96 100644 --- a/core/parachain/validator/impl/parachain_processor.cpp +++ b/core/parachain/validator/impl/parachain_processor.cpp @@ -462,8 +462,9 @@ namespace kagome::parachain { if (!validator) { SL_TRACE(logger_, "Not a parachain validator, or no para keys."); + } else { + is_parachain_validator = true; } - is_parachain_validator = true; if (!session_info) { return Error::NO_SESSION_INFO; @@ -1421,15 +1422,11 @@ namespace kagome::parachain { ParachainProcessorImpl::get_block_number_under_construction( const RelayHash &relay_parent) const { BOOST_ASSERT(main_pool_handler_->isInCurrentThread()); - - auto res_header = block_tree_->getBlockHeader(relay_parent); - if (res_header.has_error()) { - if (res_header.error() == blockchain::BlockTreeError::HEADER_NOT_FOUND) { - return 0; - } - return res_header.error(); + OUTCOME_TRY(header, block_tree_->tryGetBlockHeader(relay_parent)); + if (not header) { + return 0; } - return res_header.value().number + 1; + return header.value().number + 1; } bool ParachainProcessorImpl::bitfields_indicate_availability( diff --git a/core/parachain/validator/prospective_parachains/prospective_parachains.cpp b/core/parachain/validator/prospective_parachains/prospective_parachains.cpp index 3b5c44b50f..e181011806 100644 --- a/core/parachain/validator/prospective_parachains/prospective_parachains.cpp +++ b/core/parachain/validator/prospective_parachains/prospective_parachains.cpp @@ -172,19 +172,17 @@ namespace kagome::parachain { ProspectiveParachains::fetchBlockInfo(const RelayHash &relay_hash) { /// TODO(iceseer): do https://github.com/qdrvm/kagome/issues/1888 /// cache for block header request and calculations - auto res_header = block_tree_->getBlockHeader(relay_hash); - if (res_header.has_error()) { - if (res_header.error() == blockchain::BlockTreeError::HEADER_NOT_FOUND) { - return outcome::success(std::nullopt); - } - return res_header.error(); - } + OUTCOME_TRY(header_opt, block_tree_->tryGetBlockHeader(relay_hash)); + if (not header_opt) { + return outcome::success(std::nullopt); + } + const auto &header = header_opt.value(); return fragment::BlockInfoProspectiveParachains{ .hash = relay_hash, - .parent_hash = res_header.value().parent_hash, - .number = res_header.value().number, - .storage_root = res_header.value().state_root, + .parent_hash = header.parent_hash, + .number = header.number, + .storage_root = header.state_root, }; } diff --git a/core/parachain/validator/statement_distribution/statement_distribution.cpp b/core/parachain/validator/statement_distribution/statement_distribution.cpp index ba31755c2c..416bf1fbe7 100644 --- a/core/parachain/validator/statement_distribution/statement_distribution.cpp +++ b/core/parachain/validator/statement_distribution/statement_distribution.cpp @@ -1073,12 +1073,12 @@ namespace kagome::parachain::statement_distribution { parachain_state->get().disabled_bitmask(*group); const network::vstaging::AttestedCandidateResponse &response = r.value(); SL_DEBUG(logger, - "Fetch attested candidate success. (relay parent={}, " - "candidate={}, group index={}, statements={})", - relay_parent, - candidate_hash, - group_index, - response.statements.size()); + "Fetch attested candidate success. (relay parent={}, " + "candidate={}, group index={}, statements={})", + relay_parent, + candidate_hash, + group_index, + response.statements.size()); if (not validate(parachain_state->get(), candidate_hash, response)) { SL_WARN(logger, diff --git a/core/runtime/common/module_repository_impl.cpp b/core/runtime/common/module_repository_impl.cpp index 6d80480502..c4bbb4eb89 100644 --- a/core/runtime/common/module_repository_impl.cpp +++ b/core/runtime/common/module_repository_impl.cpp @@ -74,7 +74,7 @@ namespace kagome::runtime { KAGOME_PROFILE_START(module_retrieval) Item item; - auto res = SAFE_UNIQUE(cache_)->outcome::result { + OUTCOME_TRY(SAFE_UNIQUE(cache_)->outcome::result { if (auto r = cache_.get(state)) { item = r->get(); } else { @@ -93,8 +93,7 @@ namespace kagome::runtime { cache_.put(state, item); } return outcome::success(); - }; - OUTCOME_TRY(res); + }); return item; } } // namespace kagome::runtime diff --git a/core/runtime/common/runtime_upgrade_tracker_impl.cpp b/core/runtime/common/runtime_upgrade_tracker_impl.cpp index 02d13a97a1..f5f4dbb727 100644 --- a/core/runtime/common/runtime_upgrade_tracker_impl.cpp +++ b/core/runtime/common/runtime_upgrade_tracker_impl.cpp @@ -8,8 +8,6 @@ #include -#include "blockchain/block_header_repository.hpp" -#include "blockchain/block_storage.hpp" #include "blockchain/block_tree.hpp" #include "log/profiling_logger.hpp" #include "log/trace_macros.hpp" @@ -19,15 +17,13 @@ namespace kagome::runtime { outcome::result> RuntimeUpgradeTrackerImpl::create( - std::shared_ptr header_repo, std::shared_ptr storage, std::shared_ptr code_substitutes, - std::shared_ptr block_storage) { - BOOST_ASSERT(header_repo); + std::shared_ptr block_tree) { BOOST_ASSERT(storage); BOOST_ASSERT(code_substitutes); - BOOST_ASSERT(block_storage); + BOOST_ASSERT(block_tree); OUTCOME_TRY(encoded_opt, storage->getSpace(storage::Space::kDefault) @@ -41,25 +37,22 @@ namespace kagome::runtime { saved_data = std::move(decoded); } return std::unique_ptr{ - new RuntimeUpgradeTrackerImpl(std::move(header_repo), - std::move(storage), + new RuntimeUpgradeTrackerImpl(std::move(storage), std::move(code_substitutes), std::move(saved_data), - std::move(block_storage))}; + std::move(block_tree))}; } RuntimeUpgradeTrackerImpl::RuntimeUpgradeTrackerImpl( - std::shared_ptr header_repo, std::shared_ptr storage, std::shared_ptr code_substitutes, std::vector &&saved_data, - std::shared_ptr block_storage) + std::shared_ptr block_tree) : runtime_upgrades_{std::move(saved_data)}, - header_repo_{std::move(header_repo)}, storage_{storage->getSpace(storage::Space::kDefault)}, known_code_substitutes_{std::move(code_substitutes)}, - block_storage_{std::move(block_storage)}, + block_tree_{std::move(block_tree)}, logger_{log::createLogger("StorageCodeProvider", "runtime")} {} bool RuntimeUpgradeTrackerImpl::hasCodeSubstitute( @@ -72,23 +65,15 @@ namespace kagome::runtime { const primitives::BlockInfo &chain_end) const { // if the found state is finalized, it is guaranteed to not belong to a // different fork - primitives::BlockInfo last_finalized; - auto block_tree = block_tree_.lock(); - if (block_tree) { - last_finalized = block_tree->getLastFinalized(); // less expensive - } else { - OUTCOME_TRY(block_info, block_storage_->getLastFinalized()); - last_finalized = block_info; - } + primitives::BlockInfo last_finalized = block_tree_->getLastFinalized(); if (last_finalized.number >= state.number) { return true; } // a non-finalized state may belong to a different fork, need to check // explicitly (can be expensive if blocks are far apart) KAGOME_PROFILE_START(has_direct_chain) - BOOST_ASSERT(block_tree); bool has_direct_chain = - block_tree->hasDirectChain(state.hash, chain_end.hash); + block_tree_->hasDirectChain(state.hash, chain_end.hash); KAGOME_PROFILE_END(has_direct_chain) return has_direct_chain; } @@ -145,7 +130,7 @@ namespace kagome::runtime { if (latest_upgrade == runtime_upgrades_.begin()) { // if we have no info on updates before this block, we just return its // state - OUTCOME_TRY(block_header, header_repo_->getBlockHeader(block.hash)); + OUTCOME_TRY(block_header, block_tree_->getBlockHeader(block.hash)); SL_DEBUG( logger_, "Pick runtime state at block {} for the same block", block); return block_header.state_root; @@ -166,7 +151,7 @@ namespace kagome::runtime { } // if this is an orphan block for some reason, just return its state_root // (there is no other choice) - OUTCOME_TRY(block_header, header_repo_->getBlockHeader(block.hash)); + OUTCOME_TRY(block_header, block_tree_->getBlockHeader(block.hash)); logger_->warn("Block {}, a child of block {} is orphan", block, primitives::BlockInfo(block_header.number - 1, @@ -188,11 +173,7 @@ namespace kagome::runtime { void RuntimeUpgradeTrackerImpl::subscribeToBlockchainEvents( std::shared_ptr - chain_sub_engine, - std::shared_ptr block_tree) { - BOOST_ASSERT(block_tree != nullptr); - block_tree_ = block_tree; - + chain_sub_engine) { chain_subscription_ = primitives::events::subscribe( chain_sub_engine, primitives::events::ChainEventType::kNewRuntime, @@ -203,9 +184,9 @@ namespace kagome::runtime { .get(); auto res = push(block_hash); if (res.has_value() and res.value().second) { - auto header_res = header_repo_->getBlockHeader(block_hash); + auto header_res = block_tree_->getBlockHeader(block_hash); if (header_res.has_value()) { - auto &header = header_res.value(); + const auto &header = header_res.value(); primitives::BlockInfo block_info{header.number, block_hash}; SL_INFO(logger_, "Runtime upgrade at block {}", block_info); } @@ -215,7 +196,7 @@ namespace kagome::runtime { outcome::result> RuntimeUpgradeTrackerImpl::push(const primitives::BlockHash &hash) { - OUTCOME_TRY(header, header_repo_->getBlockHeader(hash)); + OUTCOME_TRY(header, block_tree_->getBlockHeader(hash)); primitives::BlockInfo block_info{header.number, hash}; bool is_new_upgrade = diff --git a/core/runtime/common/runtime_upgrade_tracker_impl.hpp b/core/runtime/common/runtime_upgrade_tracker_impl.hpp index c1c0a2d14a..557e0bc96f 100644 --- a/core/runtime/common/runtime_upgrade_tracker_impl.hpp +++ b/core/runtime/common/runtime_upgrade_tracker_impl.hpp @@ -19,9 +19,7 @@ #include "storage/trie/types.hpp" namespace kagome::blockchain { - class BlockHeaderRepository; class BlockTree; - class BlockStorage; } // namespace kagome::blockchain namespace kagome::runtime { @@ -33,11 +31,10 @@ namespace kagome::runtime { * which may fail, thus construction only from a factory method */ static outcome::result> create( - std::shared_ptr header_repo, std::shared_ptr storage, std::shared_ptr code_substitutes, - std::shared_ptr block_storage); + std::shared_ptr block_tree); struct RuntimeUpgradeData { SCALE_TIE(2); @@ -55,8 +52,7 @@ namespace kagome::runtime { void subscribeToBlockchainEvents( std::shared_ptr - chain_sub_engine, - std::shared_ptr block_tree); + chain_sub_engine); outcome::result getLastCodeUpdateState( const primitives::BlockInfo &block) override; @@ -66,12 +62,11 @@ namespace kagome::runtime { private: RuntimeUpgradeTrackerImpl( - std::shared_ptr header_repo, std::shared_ptr storage, std::shared_ptr code_substitutes, std::vector &&saved_data, - std::shared_ptr block_storage); + std::shared_ptr block_tree); outcome::result isStateInChain( const primitives::BlockInfo &state, @@ -103,12 +98,10 @@ namespace kagome::runtime { std::shared_ptr chain_subscription_; - std::weak_ptr block_tree_; - std::shared_ptr header_repo_; std::shared_ptr storage_; std::shared_ptr known_code_substitutes_; - std::shared_ptr block_storage_; + std::shared_ptr block_tree_; log::Logger logger_; }; diff --git a/core/runtime/common/trie_storage_provider_impl.cpp b/core/runtime/common/trie_storage_provider_impl.cpp index 93f70d3169..7546f5324a 100644 --- a/core/runtime/common/trie_storage_provider_impl.cpp +++ b/core/runtime/common/trie_storage_provider_impl.cpp @@ -158,7 +158,7 @@ namespace kagome::runtime { // TODO(turuslan): #2067, clone batch or implement delta_trie_root auto child_apply = [&](BufferView child, - storage::trie::TrieBatch &map) -> outcome::result { + storage::BufferStorage &map) -> outcome::result { for (auto &transaction : transaction_stack_) { auto it = transaction.child_batches.find(child); if (it == transaction.child_batches.end()) { diff --git a/core/storage/CMakeLists.txt b/core/storage/CMakeLists.txt index eb6ef065e7..2c69d8bb5b 100644 --- a/core/storage/CMakeLists.txt +++ b/core/storage/CMakeLists.txt @@ -12,12 +12,14 @@ add_library(storage rocksdb/rocksdb_spaces.cpp database_error.cpp changes_trie/impl/storage_changes_tracker_impl.cpp + in_memory/in_memory_storage.cpp trie/child_prefix.cpp trie/compact_decode.cpp trie/compact_encode.cpp trie/impl/trie_batch_base.cpp trie/impl/ephemeral_trie_batch_impl.cpp trie/impl/trie_storage_impl.cpp + trie/impl/trie_storage_backend_batch.cpp trie/impl/trie_storage_backend_impl.cpp trie/impl/persistent_trie_batch_impl.cpp trie/impl/topper_trie_batch_impl.cpp @@ -29,7 +31,6 @@ add_library(storage trie/serialization/trie_serializer_impl.cpp trie/serialization/polkadot_codec.cpp trie_pruner/impl/trie_pruner_impl.cpp - migrations/migrations.cpp ) target_link_libraries(storage blob diff --git a/core/storage/buffer_map_types.hpp b/core/storage/buffer_map_types.hpp index 8c3904d1e5..488739feb5 100644 --- a/core/storage/buffer_map_types.hpp +++ b/core/storage/buffer_map_types.hpp @@ -13,9 +13,9 @@ #include "common/buffer.hpp" #include "common/buffer_or_view.hpp" +#include "storage/face/batch_writeable.hpp" #include "storage/face/generic_maps.hpp" #include "storage/face/write_batch.hpp" -#include "storage/spaces.hpp" namespace kagome::storage::face { template <> @@ -36,10 +36,7 @@ namespace kagome::storage { using BufferBatch = face::WriteBatch; - using BufferSpacedBatch = face::SpacedBatch; - using BufferStorage = face::GenericStorage; - using BufferBatchableStorage = face::BatchableStorage; using BufferStorageCursor = face::MapCursor; } // namespace kagome::storage diff --git a/core/storage/face/batch_writeable.hpp b/core/storage/face/batch_writeable.hpp index c8546452a5..6077980e53 100644 --- a/core/storage/face/batch_writeable.hpp +++ b/core/storage/face/batch_writeable.hpp @@ -9,6 +9,7 @@ #include #include "storage/face/write_batch.hpp" +#include "storage/face/writeable.hpp" namespace kagome::storage::face { @@ -26,7 +27,9 @@ namespace kagome::storage::face { * @brief Creates new Write Batch - an object, which can be used to * efficiently write bulk data. */ - virtual std::unique_ptr> batch() = 0; + virtual std::unique_ptr> batch() { + throw std::logic_error{"BatchWriteable::batch not implemented"}; + } }; } // namespace kagome::storage::face diff --git a/core/storage/face/generic_maps.hpp b/core/storage/face/generic_maps.hpp index fdd1480881..8787d141b0 100644 --- a/core/storage/face/generic_maps.hpp +++ b/core/storage/face/generic_maps.hpp @@ -12,15 +12,16 @@ #include "storage/face/writeable.hpp" namespace kagome::storage::face { - /** - * @brief An abstraction over a readable, writeable, iterable, batchable - * key-value map. + * @brief An abstraction over a readable, writeable, iterable key-value map. * @tparam K key type * @tparam V value type */ template - struct GenericStorage : Readable, Iterable, Writeable { + struct GenericStorage : Readable, + Iterable, + Writeable, + BatchWriteable { /** * Reports RAM state size * @return size in bytes @@ -30,7 +31,4 @@ namespace kagome::storage::face { } }; - template - struct BatchableStorage : GenericStorage, BatchWriteable {}; - } // namespace kagome::storage::face diff --git a/core/storage/face/write_batch.hpp b/core/storage/face/write_batch.hpp index 102fe46a4b..55b6bf49c6 100644 --- a/core/storage/face/write_batch.hpp +++ b/core/storage/face/write_batch.hpp @@ -29,43 +29,4 @@ namespace kagome::storage::face { virtual void clear() = 0; }; - /** - * @brief An abstraction over a spaced storage, which can be used for batch - * writes - * @tparam K key type - * @tparam V value type - */ - template - struct SpacedBatch { - virtual ~SpacedBatch() = default; - - /** - * @brief Store value by key - * @param key key - * @param value value - * @return result containing void if put successful, error otherwise - */ - virtual outcome::result put(Space space, - const View &key, - OwnedOrView &&value) = 0; - - /** - * @brief Remove value by key - * @param key K - * @return error code if error happened - */ - virtual outcome::result remove(Space space, const View &key) = 0; - - /** - * @brief Writes batch. - * @return error code in case of error. - */ - virtual outcome::result commit() = 0; - - /** - * @brief Clear batch. - */ - virtual void clear() = 0; - }; - } // namespace kagome::storage::face diff --git a/test/testutil/storage/in_memory/cursor.hpp b/core/storage/in_memory/cursor.hpp similarity index 96% rename from test/testutil/storage/in_memory/cursor.hpp rename to core/storage/in_memory/cursor.hpp index 6f35979cd2..52b7b79ae9 100644 --- a/test/testutil/storage/in_memory/cursor.hpp +++ b/core/storage/in_memory/cursor.hpp @@ -7,7 +7,7 @@ #pragma once #include "common/buffer.hpp" -#include "testutil/storage/in_memory/in_memory_storage.hpp" +#include "storage/in_memory/in_memory_storage.hpp" namespace kagome::storage { class InMemoryCursor : public BufferStorageCursor { diff --git a/core/storage/in_memory/in_memory_batch.hpp b/core/storage/in_memory/in_memory_batch.hpp new file mode 100644 index 0000000000..f73e2c42c6 --- /dev/null +++ b/core/storage/in_memory/in_memory_batch.hpp @@ -0,0 +1,47 @@ +/** + * Copyright Quadrivium LLC + * All Rights Reserved + * SPDX-License-Identifier: Apache-2.0 + */ + +#pragma once + +#include "common/buffer.hpp" +#include "storage/in_memory/in_memory_storage.hpp" + +namespace kagome::storage { + using kagome::common::Buffer; + + class InMemoryBatch : public BufferBatch { + public: + explicit InMemoryBatch(InMemoryStorage &db) : db{db} {} + + outcome::result put(const BufferView &key, + BufferOrView &&value) override { + entries[key.toHex()] = std::move(value).intoBuffer(); + return outcome::success(); + } + + outcome::result remove(const BufferView &key) override { + entries.erase(key.toHex()); + return outcome::success(); + } + + outcome::result commit() override { + for (auto &entry : entries) { + OUTCOME_TRY(db.put(Buffer::fromHex(entry.first).value(), + BufferView{entry.second})); + } + return outcome::success(); + } + + void clear() override { + entries.clear(); + } + + private: + std::map entries; + // NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members) + InMemoryStorage &db; + }; +} // namespace kagome::storage diff --git a/test/testutil/storage/in_memory/in_memory_spaced_storage.hpp b/core/storage/in_memory/in_memory_spaced_storage.hpp similarity index 76% rename from test/testutil/storage/in_memory/in_memory_spaced_storage.hpp rename to core/storage/in_memory/in_memory_spaced_storage.hpp index d5c0c43cba..0a257c6866 100644 --- a/test/testutil/storage/in_memory/in_memory_spaced_storage.hpp +++ b/core/storage/in_memory/in_memory_spaced_storage.hpp @@ -14,8 +14,8 @@ #include "in_memory_storage.hpp" #include "outcome/outcome.hpp" #include "storage/buffer_map_types.hpp" +#include "storage/in_memory/in_memory_storage.hpp" #include "storage/spaced_storage.hpp" -#include "testutil/storage/in_memory/in_memory_storage.hpp" namespace kagome::storage { @@ -24,9 +24,9 @@ namespace kagome::storage { * Mostly needed to have an in-memory trie in tests to avoid integration with * an actual persistent database */ - class InMemorySpacedStorage final : public SpacedStorage { + class InMemorySpacedStorage : public storage::SpacedStorage { public: - std::shared_ptr getSpace(Space space) override { + std::shared_ptr getSpace(Space space) override { auto it = spaces.find(space); if (it != spaces.end()) { return it->second; @@ -35,8 +35,6 @@ namespace kagome::storage { .first->second; } - std::unique_ptr createBatch() override; - private: std::map> spaces; }; diff --git a/test/testutil/storage/in_memory/in_memory_storage.cpp b/core/storage/in_memory/in_memory_storage.cpp similarity index 86% rename from test/testutil/storage/in_memory/in_memory_storage.cpp rename to core/storage/in_memory/in_memory_storage.cpp index 3dd0d6bd1e..e1862a3a65 100644 --- a/test/testutil/storage/in_memory/in_memory_storage.cpp +++ b/core/storage/in_memory/in_memory_storage.cpp @@ -4,11 +4,11 @@ * SPDX-License-Identifier: Apache-2.0 */ -#include "testutil/storage/in_memory/in_memory_storage.hpp" +#include "storage/in_memory/in_memory_storage.hpp" #include "storage/database_error.hpp" -#include "testutil/storage/in_memory/cursor.hpp" -#include "testutil/storage/in_memory/in_memory_batch.hpp" +#include "storage/in_memory/cursor.hpp" +#include "storage/in_memory/in_memory_batch.hpp" using kagome::common::Buffer; @@ -69,8 +69,4 @@ namespace kagome::storage { std::optional InMemoryStorage::byteSizeHint() const { return size_; } - - std::unique_ptr InMemorySpacedStorage::createBatch() { - return std::make_unique(*this); - } } // namespace kagome::storage diff --git a/test/testutil/storage/in_memory/in_memory_storage.hpp b/core/storage/in_memory/in_memory_storage.hpp similarity index 90% rename from test/testutil/storage/in_memory/in_memory_storage.hpp rename to core/storage/in_memory/in_memory_storage.hpp index dddec04384..dc777c1c59 100644 --- a/test/testutil/storage/in_memory/in_memory_storage.hpp +++ b/core/storage/in_memory/in_memory_storage.hpp @@ -11,8 +11,6 @@ #include "common/buffer.hpp" #include "outcome/outcome.hpp" #include "storage/buffer_map_types.hpp" -#include "storage/face/batch_writeable.hpp" -#include "storage/face/write_batch.hpp" namespace kagome::storage { @@ -21,7 +19,7 @@ namespace kagome::storage { * Mostly needed to have an in-memory trie in tests to avoid integration with * an actual persistent database */ - class InMemoryStorage : public BufferBatchableStorage { + class InMemoryStorage : public storage::BufferStorage { public: ~InMemoryStorage() override = default; diff --git a/core/storage/map_prefix/prefix.cpp b/core/storage/map_prefix/prefix.cpp index ce9a045ac1..8ad19f3280 100644 --- a/core/storage/map_prefix/prefix.cpp +++ b/core/storage/map_prefix/prefix.cpp @@ -7,7 +7,6 @@ #include "storage/map_prefix/prefix.hpp" #include -#include "storage/buffer_map_types.hpp" namespace kagome::storage { inline std::optional afterPrefix(Buffer key) { @@ -100,8 +99,7 @@ namespace kagome::storage { batch->clear(); } - MapPrefix::MapPrefix(BufferView prefix, - std::shared_ptr map) + MapPrefix::MapPrefix(BufferView prefix, std::shared_ptr map) : prefix{prefix}, after_prefix{afterPrefix(this->prefix)}, map{std::move(map)} {} diff --git a/core/storage/map_prefix/prefix.hpp b/core/storage/map_prefix/prefix.hpp index cd055be9d4..48d42b5b5e 100644 --- a/core/storage/map_prefix/prefix.hpp +++ b/core/storage/map_prefix/prefix.hpp @@ -7,14 +7,13 @@ #pragma once #include "storage/buffer_map_types.hpp" -#include "storage/face/batch_writeable.hpp" namespace kagome::storage { /** * Map wrapper to use keys under prefix. * Cursor removes key prefix and can seeks first/last. */ - struct MapPrefix : BufferBatchableStorage { + struct MapPrefix : BufferStorage { struct Cursor : BufferStorageCursor { Cursor(MapPrefix &map, std::unique_ptr cursor); @@ -46,7 +45,7 @@ namespace kagome::storage { std::unique_ptr batch; }; - MapPrefix(BufferView prefix, std::shared_ptr map); + MapPrefix(BufferView prefix, std::shared_ptr map); Buffer _key(BufferView key) const; outcome::result contains(const BufferView &key) const override; @@ -61,6 +60,6 @@ namespace kagome::storage { Buffer prefix; std::optional after_prefix; - std::shared_ptr map; + std::shared_ptr map; }; } // namespace kagome::storage diff --git a/core/storage/migrations/migrations.cpp b/core/storage/migrations/migrations.cpp deleted file mode 100644 index 9b3a2e7184..0000000000 --- a/core/storage/migrations/migrations.cpp +++ /dev/null @@ -1,164 +0,0 @@ -#include "storage/migrations/migrations.hpp" - -#include -#include - -#include -#include - -#include "blockchain/block_tree.hpp" -#include "blockchain/block_tree_error.hpp" -#include "common/blob.hpp" -#include "injector/application_injector.hpp" -#include "log/logger.hpp" -#include "primitives/common.hpp" -#include "runtime/runtime_upgrade_tracker.hpp" -#include "storage/database_error.hpp" -#include "storage/spaced_storage.hpp" -#include "storage/trie/trie_batches.hpp" -#include "storage/trie/trie_storage.hpp" -#include "storage/trie/trie_storage_backend.hpp" - -namespace kagome::storage::migrations { - - outcome::result migrateTree(SpacedStorage &storage, - trie::TrieBatch &trie_batch, - log::Logger &logger) { - auto batch = storage.createBatch(); - auto cursor = trie_batch.trieCursor(); - OUTCOME_TRY(cursor->next()); - - auto nodes = storage.getSpace(Space::kTrieNode); - auto values = storage.getSpace(Space::kTrieValue); - size_t migrated_values = 0; - size_t total_values = 0; - size_t small_values = 0; - while (cursor->isValid()) { - auto value_hash = cursor->valueHash(); - BOOST_ASSERT(value_hash.has_value()); // because cursor isValid - if (!value_hash->small) { - OUTCOME_TRY(present_in_values, values->contains(value_hash->hash)); - if (!present_in_values) { - OUTCOME_TRY(value, nodes->get(value_hash->hash)); - OUTCOME_TRY(batch->put( - Space::kTrieValue, value_hash->hash, std::move(value))); - OUTCOME_TRY(batch->remove(Space::kTrieNode, value_hash->hash)); - migrated_values++; - } - } else { - small_values++; - } - total_values++; - OUTCOME_TRY(cursor->next()); - } - SL_VERBOSE(logger, - "total values: {}, migrated values: {}, small values: {}", - total_values, - migrated_values, - small_values); - OUTCOME_TRY(batch->commit()); - return outcome::success(); - } - - outcome::result separateTrieValues( - const blockchain::BlockTree &block_tree, - const trie::TrieStorage &trie_storage, - SpacedStorage &storage, - runtime::RuntimeUpgradeTracker &upgrade_tracker) { - auto logger = log::createLogger("Migration", "storage"); - SL_INFO(logger, - "Begin trie storage migration to separate nodes and values"); - if (storage.getSpace(Space::kTrieValue)->cursor()->isValid()) { - SL_INFO(logger, - "Stop trie storage migration, trie values column is not empty " - "(migration is not required)."); - return outcome::success(); - } - - std::deque pending; - { - OUTCOME_TRY(children, - block_tree.getChildren(block_tree.getLastFinalized().hash)); - std::ranges::copy(children, std::back_inserter(pending)); - } - while (!pending.empty()) { - primitives::BlockHash current = pending.front(); - pending.pop_front(); - auto header = block_tree.getBlockHeader(current); - if (!header) { - if (header.error() != blockchain::BlockTreeError::HEADER_NOT_FOUND) { - return header.error(); - } - continue; - } - OUTCOME_TRY(children, block_tree.getChildren(current)); - std::ranges::copy(children, std::back_inserter(pending)); - SL_VERBOSE(logger, "Migrating block {}...", header.value().blockInfo()); - OUTCOME_TRY(batch, - trie_storage.getEphemeralBatchAt(header.value().state_root)); - OUTCOME_TRY(migrateTree(storage, *batch, logger)); - } - - { - // we also certainly need the block with the runtime code - OUTCOME_TRY(upgrade_state, - upgrade_tracker.getLastCodeUpdateState( - block_tree.getLastFinalized())); - OUTCOME_TRY(block, - upgrade_tracker.getLastCodeUpdateBlockInfo(upgrade_state)); - SL_VERBOSE(logger, "Migrating block {}...", block); - - OUTCOME_TRY(batch, trie_storage.getEphemeralBatchAt(upgrade_state)); - OUTCOME_TRY(migrateTree(storage, *batch, logger)); - } - auto header = block_tree.getBlockHeader(block_tree.getLastFinalized().hash); - OUTCOME_TRY(header); - { - SL_VERBOSE( - logger, "Migrating block {}...", block_tree.getLastFinalized()); - - OUTCOME_TRY(batch, - trie_storage.getEphemeralBatchAt(header.value().state_root)); - OUTCOME_TRY(migrateTree(storage, *batch, logger)); - } - SL_INFO(logger, - "Essential blocks have been migrated. In case that other finalized " - "blocks are not required, the migration may be stopped, because it " - "will take a long time. It can be restarted later, if needed."); - header = block_tree.getBlockHeader(header.value().parent_hash); - - for (; header.has_value(); - header = block_tree.getBlockHeader(header.value().parent_hash)) { - SL_VERBOSE(logger, "Migrating block {}...", header.value().blockInfo()); - auto trie_batch_res = - trie_storage.getEphemeralBatchAt(header.value().state_root); - if (!trie_batch_res) { - SL_VERBOSE(logger, - "State trie for block #{} is absent, assume we've reached " - "fast-synced blocks.", - header.value().number); - break; - } - - OUTCOME_TRY(migrateTree(storage, *trie_batch_res.value(), logger)); - } - if (header.has_error() - && header.error() != blockchain::BlockTreeError::HEADER_NOT_FOUND) { - return header.error(); - } - - SL_INFO(logger, "Trie storage migration ended successfully"); - return outcome::success(); - } - - outcome::result runMigrations(injector::KagomeNodeInjector &injector) { - auto block_tree = injector.injectBlockTree(); - auto trie_storage = injector.injectTrieStorage(); - auto storage = injector.injectStorage(); - auto upgrade_tracker = injector.injectRuntimeUpgradeTracker(); - - OUTCOME_TRY(separateTrieValues( - *block_tree, *trie_storage, *storage, *upgrade_tracker)); - return outcome::success(); - } -} // namespace kagome::storage::migrations diff --git a/core/storage/migrations/migrations.hpp b/core/storage/migrations/migrations.hpp deleted file mode 100644 index 1be8146452..0000000000 --- a/core/storage/migrations/migrations.hpp +++ /dev/null @@ -1,34 +0,0 @@ -/** - * Copyright Quadrivium LLC - * All Rights Reserved - * SPDX-License-Identifier: Apache-2.0 - */ - -#pragma once - -#include -#include "injector/application_injector.hpp" - -namespace kagome { - namespace blockchain { - class BlockTree; - } - namespace storage::trie { - class TrieStorage; - class TrieStorageBackend; - } // namespace storage::trie - namespace injector { - class KagomeNodeInjector; - } -} // namespace kagome - -namespace kagome::storage::migrations { - - outcome::result separateTrieValues( - const blockchain::BlockTree &block_tree, - const trie::TrieStorage &trie_storage, - trie::TrieStorageBackend &trie_backend); - - outcome::result runMigrations(injector::KagomeNodeInjector &injector); - -} // namespace kagome::storage::migrations diff --git a/core/storage/rocksdb/rocksdb.cpp b/core/storage/rocksdb/rocksdb.cpp index 104050e18f..b4c1f0a523 100644 --- a/core/storage/rocksdb/rocksdb.cpp +++ b/core/storage/rocksdb/rocksdb.cpp @@ -5,14 +5,14 @@ */ #include "storage/rocksdb/rocksdb.hpp" -#include #include #include -#include +#include +#include +#include #include "filesystem/common.hpp" -#include "storage/buffer_map_types.hpp" #include "storage/database_error.hpp" #include "storage/rocksdb/rocksdb_batch.hpp" #include "storage/rocksdb/rocksdb_cursor.hpp" @@ -23,6 +23,38 @@ namespace kagome::storage { namespace fs = std::filesystem; + rocksdb::ColumnFamilyOptions configureColumn(uint32_t memory_budget) { + rocksdb::ColumnFamilyOptions options; + options.OptimizeLevelStyleCompaction(memory_budget); + auto table_options = RocksDb::tableOptionsConfiguration(); + options.table_factory.reset(NewBlockBasedTableFactory(table_options)); + return options; + } + + template + void configureColumnFamilies( + std::vector &column_family_descriptors, + std::vector &ttls, + ColumnFamilyNames &&cf_names, + const std::unordered_map &column_ttl, + uint32_t trie_space_cache_size, + uint32_t other_spaces_cache_size, + log::Logger &log) { + for (auto &space_name : std::forward(cf_names)) { + auto ttl = 0; + if (const auto it = column_ttl.find(space_name); it != column_ttl.end()) { + ttl = it->second; + } + column_family_descriptors.emplace_back( + space_name, + configureColumn(space_name != spaceName(Space::kTrieNode) + ? other_spaces_cache_size + : trie_space_cache_size)); + ttls.push_back(ttl); + SL_DEBUG(log, "Column family {} configured with TTL {}", space_name, ttl); + } + } + RocksDb::RocksDb() : logger_(log::createLogger("RocksDB", "storage")) { ro_.fill_cache = false; } @@ -33,6 +65,7 @@ namespace kagome::storage { } delete db_; } + outcome::result> RocksDb::create( const filesystem::path &path, rocksdb::Options options, @@ -40,7 +73,7 @@ namespace kagome::storage { bool prevent_destruction, const std::unordered_map &column_ttl, bool enable_migration) { - const auto no_db_exists = not fs::exists(path); + const auto no_db_presented = not fs::exists(path); OUTCOME_TRY(mkdirs(path)); auto log = log::createLogger("RocksDB", "storage"); @@ -54,15 +87,6 @@ namespace kagome::storage { const uint32_t other_spaces_cache_size = (memory_budget - trie_space_cache_size) / (storage::Space::kTotal - 1); - std::vector column_family_descriptors; - std::vector ttls; - configureColumnFamilies(column_family_descriptors, - ttls, - column_ttl, - trie_space_cache_size, - other_spaces_cache_size, - log); - std::vector existing_families; auto res = rocksdb::DB::ListColumnFamilies( options, path.native(), &existing_families); @@ -74,12 +98,41 @@ namespace kagome::storage { return status_as_error(res); } + std::unordered_set all_families; + + auto required_families = + std::views::iota(0, Space::kTotal) | std::views::transform([](int i) { + return spaceName(static_cast(i)); + }); + std::ranges::copy(required_families, + std::inserter(all_families, all_families.end())); + + for (auto &existing_family : existing_families) { + auto [_, was_inserted] = all_families.insert(existing_family); + if (was_inserted) { + SL_WARN(log, + "Column family '{}' present in database but not used by " + "KAGOME, probably obsolete.", + existing_family); + } + } + + std::vector column_family_descriptors; + std::vector ttls; + configureColumnFamilies(column_family_descriptors, + ttls, + all_families, + column_ttl, + trie_space_cache_size, + other_spaces_cache_size, + log); + options.create_missing_column_families = true; auto rocks_db = std::shared_ptr(new RocksDb); const auto ttl_migrated_path = path.parent_path() / "ttl_migrated"; const auto ttl_migrated_exists = fs::exists(ttl_migrated_path); - if (no_db_exists or ttl_migrated_exists) { + if (no_db_presented or ttl_migrated_exists) { OUTCOME_TRY(openDatabaseWithTTL(options, path, column_family_descriptors, @@ -126,28 +179,6 @@ namespace kagome::storage { return outcome::success(); } - void RocksDb::configureColumnFamilies( - std::vector &column_family_descriptors, - std::vector &ttls, - const std::unordered_map &column_ttl, - uint32_t trie_space_cache_size, - uint32_t other_spaces_cache_size, - log::Logger &log) { - for (auto i = 0; i < Space::kTotal; ++i) { - const auto space_name = spaceName(static_cast(i)); - auto ttl = 0; - if (const auto it = column_ttl.find(space_name); it != column_ttl.end()) { - ttl = it->second; - } - column_family_descriptors.emplace_back( - space_name, - configureColumn(i != Space::kTrieNode ? other_spaces_cache_size - : trie_space_cache_size)); - ttls.push_back(ttl); - SL_DEBUG(log, "Column family {} configured with TTL {}", space_name, ttl); - } - } - outcome::result RocksDb::openDatabaseWithTTL( const rocksdb::Options &options, const filesystem::path &path, @@ -171,12 +202,6 @@ namespace kagome::storage { status.ToString()); return status_as_error(status); } - for (auto *handle : rocks_db->column_family_handles_) { - auto space = spaceByName(handle->GetName()); - BOOST_ASSERT(space.has_value()); - rocks_db->spaces_[*space] = std::make_shared( - rocks_db->weak_from_this(), *space, rocks_db->logger_); - } if (not fs::exists(ttl_migrated_path)) { std::ofstream file(ttl_migrated_path.native()); if (not file) { @@ -200,7 +225,7 @@ namespace kagome::storage { const filesystem::path &ttl_migrated_path, log::Logger &log) { rocksdb::DB *db_raw = nullptr; - std::vector column_family_handles; + std::vector column_family_handles; auto status = rocksdb::DB::Open(options, path.native(), column_family_descriptors, @@ -217,7 +242,7 @@ namespace kagome::storage { auto defer_db = std::make_unique(db, column_family_handles, log); - std::vector column_family_handles_with_ttl; + std::vector column_family_handles_with_ttl; const auto ttl_path = path.parent_path() / "db_ttl"; std::error_code ec; fs::create_directories(ttl_path, ec); @@ -289,39 +314,68 @@ namespace kagome::storage { ec); return DatabaseError::IO_ERROR; } - - return openDatabaseWithTTL(options, - path, - column_family_descriptors, - ttls, - rocks_db, - ttl_migrated_path, - log); + status = rocksdb::DBWithTTL::Open(options, + path.native(), + column_family_descriptors, + &rocks_db->column_family_handles_, + &rocks_db->db_, + ttls); + if (not status.ok()) { + SL_ERROR(log, + "Can't open database in {}: {}", + path.native(), + status.ToString()); + return status_as_error(status); + } + std::ofstream file(ttl_migrated_path.native()); + if (not file) { + SL_ERROR( + log, "Can't create file {} for database", ttl_migrated_path.native()); + return DatabaseError::IO_ERROR; + } + file.close(); + return outcome::success(); } - std::shared_ptr RocksDb::getSpace(Space space) { - auto it = spaces_.find(space); - BOOST_ASSERT(it != spaces_.end()); - return it->second; + std::shared_ptr RocksDb::getSpace(Space space) { + if (spaces_.contains(space)) { + return spaces_[space]; + } + auto space_name = spaceName(space); + auto column = std::ranges::find_if( + column_family_handles_, + [&space_name](const ColumnFamilyHandlePtr &handle) { + return handle->GetName() == space_name; + }); + if (column_family_handles_.end() == column) { + throw DatabaseError::INVALID_ARGUMENT; + } + auto space_ptr = + std::make_shared(weak_from_this(), *column, logger_); + spaces_[space] = space_ptr; + return space_ptr; } - outcome::result RocksDb::dropColumn(kagome::storage::Space space) { - auto column = getCFHandle(space); - auto check_status = - [this](const rocksdb::Status &status) -> outcome::result { + void RocksDb::dropColumn(kagome::storage::Space space) { + auto space_name = spaceName(space); + auto column_it = std::ranges::find_if( + column_family_handles_, + [&space_name](const ColumnFamilyHandlePtr &handle) { + return handle->GetName() == space_name; + }); + if (column_family_handles_.end() == column_it) { + throw DatabaseError::INVALID_ARGUMENT; + } + auto &handle = *column_it; + auto e = [this](const rocksdb::Status &status) { if (!status.ok()) { logger_->error("DB operation failed: {}", status.ToString()); - return status_as_error(status); + throw status_as_error(status); } - return outcome::success(); }; - OUTCOME_TRY(check_status(db_->DropColumnFamily(column))); - OUTCOME_TRY(check_status(db_->DestroyColumnFamilyHandle(column))); - rocksdb::ColumnFamilyHandle *new_handle{}; - OUTCOME_TRY(check_status( - db_->CreateColumnFamily({}, spaceName(space), &new_handle))); - column_family_handles_[static_cast(space)] = new_handle; - return outcome::success(); + e(db_->DropColumnFamily(handle)); + e(db_->DestroyColumnFamilyHandle(handle)); + e(db_->CreateColumnFamily({}, space_name, &handle)); } rocksdb::BlockBasedTableOptions RocksDb::tableOptionsConfiguration( @@ -329,35 +383,13 @@ namespace kagome::storage { rocksdb::BlockBasedTableOptions table_options; table_options.format_version = 5; table_options.block_cache = rocksdb::NewLRUCache( - static_cast(lru_cache_size_mib) * 1024 * 1024); - table_options.block_size = static_cast(block_size_kib) * 1024; + static_cast(lru_cache_size_mib * 1024 * 1024)); + table_options.block_size = static_cast(block_size_kib * 1024); table_options.cache_index_and_filter_blocks = true; table_options.filter_policy.reset(rocksdb::NewBloomFilterPolicy(10, false)); return table_options; } - rocksdb::ColumnFamilyHandle *RocksDb::getCFHandle(Space space) { - BOOST_ASSERT_MSG(static_cast(space) < column_family_handles_.size(), - "All spaces should have an associated column family"); - auto handle = column_family_handles_[static_cast(space)]; - BOOST_ASSERT(handle != nullptr); - return handle; - } - - rocksdb::ColumnFamilyOptions RocksDb::configureColumn( - uint32_t memory_budget) { - rocksdb::ColumnFamilyOptions options; - options.OptimizeLevelStyleCompaction(memory_budget); - auto table_options = tableOptionsConfiguration(); - options.table_factory.reset(NewBlockBasedTableFactory(table_options)); - return options; - } - - std::unique_ptr RocksDb::createBatch() { - return std::make_unique(shared_from_this(), - getCFHandle(Space::kDefault)); - } - RocksDb::DatabaseGuard::DatabaseGuard( std::shared_ptr db, std::vector column_family_handles, @@ -406,12 +438,16 @@ namespace kagome::storage { } RocksDbSpace::RocksDbSpace(std::weak_ptr storage, - Space space, + const RocksDb::ColumnFamilyHandlePtr &column, log::Logger logger) : storage_{std::move(storage)}, - space_{space}, + column_{column}, logger_{std::move(logger)} {} + std::unique_ptr RocksDbSpace::batch() { + return std::make_unique(*this); + } + std::optional RocksDbSpace::byteSizeHint() const { auto rocks = storage_.lock(); if (!rocks) { @@ -435,31 +471,20 @@ namespace kagome::storage { return usage_bytes; } - std::unique_ptr> RocksDbSpace::batch() { - auto rocks = storage_.lock(); - if (!rocks) { - throw DatabaseError::STORAGE_GONE; - } - auto batch = - std::make_unique(rocks, rocks->getCFHandle(space_)); - return batch; - } - std::unique_ptr RocksDbSpace::cursor() { auto rocks = storage_.lock(); if (!rocks) { throw DatabaseError::STORAGE_GONE; } auto it = std::unique_ptr( - rocks->db_->NewIterator(rocks->ro_, rocks->getCFHandle(space_))); + rocks->db_->NewIterator(rocks->ro_, column_)); return std::make_unique(std::move(it)); } outcome::result RocksDbSpace::contains(const BufferView &key) const { OUTCOME_TRY(rocks, use()); std::string value; - auto status = rocks->db_->Get( - rocks->ro_, rocks->getCFHandle(space_), make_slice(key), &value); + auto status = rocks->db_->Get(rocks->ro_, column_, make_slice(key), &value); if (status.ok()) { return true; } @@ -474,8 +499,7 @@ namespace kagome::storage { outcome::result RocksDbSpace::get(const BufferView &key) const { OUTCOME_TRY(rocks, use()); std::string value; - auto status = rocks->db_->Get( - rocks->ro_, rocks->getCFHandle(space_), make_slice(key), &value); + auto status = rocks->db_->Get(rocks->ro_, column_, make_slice(key), &value); if (status.ok()) { // cannot move string content to a buffer return Buffer( @@ -489,8 +513,7 @@ namespace kagome::storage { const BufferView &key) const { OUTCOME_TRY(rocks, use()); std::string value; - auto status = rocks->db_->Get( - rocks->ro_, rocks->getCFHandle(space_), make_slice(key), &value); + auto status = rocks->db_->Get(rocks->ro_, column_, make_slice(key), &value); if (status.ok()) { auto buf = Buffer( reinterpret_cast(value.data()), // NOLINT @@ -508,10 +531,8 @@ namespace kagome::storage { outcome::result RocksDbSpace::put(const BufferView &key, BufferOrView &&value) { OUTCOME_TRY(rocks, use()); - auto status = rocks->db_->Put(rocks->wo_, - rocks->getCFHandle(space_), - make_slice(key), - make_slice(std::move(value))); + auto status = rocks->db_->Put( + rocks->wo_, column_, make_slice(key), make_slice(std::move(value))); if (status.ok()) { return outcome::success(); } @@ -521,8 +542,7 @@ namespace kagome::storage { outcome::result RocksDbSpace::remove(const BufferView &key) { OUTCOME_TRY(rocks, use()); - auto status = rocks->db_->Delete( - rocks->wo_, rocks->getCFHandle(space_), make_slice(key)); + auto status = rocks->db_->Delete(rocks->wo_, column_, make_slice(key)); if (status.ok()) { return outcome::success(); } @@ -537,15 +557,15 @@ namespace kagome::storage { } if (rocks->db_) { std::unique_ptr begin( - rocks->db_->NewIterator(rocks->ro_, rocks->getCFHandle(space_))); + rocks->db_->NewIterator(rocks->ro_, column_)); first.empty() ? begin->SeekToFirst() : begin->Seek(make_slice(first)); auto bk = begin->key(); std::unique_ptr end( - rocks->db_->NewIterator(rocks->ro_, rocks->getCFHandle(space_))); + rocks->db_->NewIterator(rocks->ro_, column_)); last.empty() ? end->SeekToLast() : end->Seek(make_slice(last)); auto ek = end->key(); rocksdb::CompactRangeOptions options; - rocks->db_->CompactRange(options, rocks->getCFHandle(space_), &bk, &ek); + rocks->db_->CompactRange(options, column_, &bk, &ek); } } diff --git a/core/storage/rocksdb/rocksdb.hpp b/core/storage/rocksdb/rocksdb.hpp index 79969870fb..6d2e66369c 100644 --- a/core/storage/rocksdb/rocksdb.hpp +++ b/core/storage/rocksdb/rocksdb.hpp @@ -6,7 +6,6 @@ #pragma once -#include "common/buffer.hpp" #include "storage/buffer_map_types.hpp" #include @@ -16,14 +15,15 @@ #include "filesystem/common.hpp" #include "log/logger.hpp" -#include "storage/face/batch_writeable.hpp" -#include "storage/rocksdb/rocksdb_spaces.hpp" #include "storage/spaced_storage.hpp" namespace kagome::storage { class RocksDb : public SpacedStorage, public std::enable_shared_from_this { + private: + using ColumnFamilyHandlePtr = rocksdb::ColumnFamilyHandle *; + public: ~RocksDb() override; @@ -53,16 +53,14 @@ namespace kagome::storage { const std::unordered_map &column_ttl = {}, bool enable_migration = true); - std::shared_ptr getSpace(Space space) override; - - std::unique_ptr createBatch() override; + std::shared_ptr getSpace(Space space) override; /** * Implementation specific way to erase the whole space data. * Not exposed at SpacedStorage level as only used in pruner. * @param space - storage space identifier to clear */ - outcome::result dropColumn(Space space); + void dropColumn(Space space); /** * Prepare configuration structure @@ -100,20 +98,9 @@ namespace kagome::storage { RocksDb(); - rocksdb::ColumnFamilyHandle *getCFHandle(Space space); - - static rocksdb::ColumnFamilyOptions configureColumn(uint32_t memory_budget); static outcome::result createDirectory( const std::filesystem::path &absolute_path, log::Logger &log); - static void configureColumnFamilies( - std::vector &column_family_descriptors, - std::vector &ttls, - const std::unordered_map &column_ttl, - uint32_t trie_space_cache_size, - uint32_t other_spaces_cache_size, - log::Logger &log); - static outcome::result openDatabaseWithTTL( const rocksdb::Options &options, const filesystem::path &path, @@ -135,25 +122,24 @@ namespace kagome::storage { log::Logger &log); rocksdb::DBWithTTL *db_{}; - std::vector column_family_handles_; - boost::container::flat_map> - spaces_; + std::vector column_family_handles_; + boost::container::flat_map> spaces_; rocksdb::ReadOptions ro_; rocksdb::WriteOptions wo_; log::Logger logger_; }; - class RocksDbSpace : public BufferBatchableStorage { + class RocksDbSpace : public BufferStorage { public: ~RocksDbSpace() override = default; RocksDbSpace(std::weak_ptr storage, - Space space, + const RocksDb::ColumnFamilyHandlePtr &column, log::Logger logger); - std::optional byteSizeHint() const override; + std::unique_ptr batch() override; - std::unique_ptr> batch() override; + std::optional byteSizeHint() const override; std::unique_ptr cursor() override; @@ -172,14 +158,14 @@ namespace kagome::storage { void compact(const Buffer &first, const Buffer &last); friend class RocksDbBatch; - friend class RocksDb; private: // gather storage instance from weak ptr outcome::result> use() const; std::weak_ptr storage_; - Space space_; + // NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members) + const RocksDb::ColumnFamilyHandlePtr &column_; log::Logger logger_; }; } // namespace kagome::storage diff --git a/core/storage/rocksdb/rocksdb_batch.cpp b/core/storage/rocksdb/rocksdb_batch.cpp index 505fbab55b..4910dfad84 100644 --- a/core/storage/rocksdb/rocksdb_batch.cpp +++ b/core/storage/rocksdb/rocksdb_batch.cpp @@ -11,39 +11,30 @@ namespace kagome::storage { - RocksDbBatch::RocksDbBatch(std::shared_ptr db, - rocksdb::ColumnFamilyHandle *default_cf) - : db_(std::move(db)), default_cf_(default_cf) { - BOOST_ASSERT(db_ != nullptr); - BOOST_ASSERT(default_cf_ != nullptr); - } + RocksDbBatch::RocksDbBatch(RocksDbSpace &db) : db_(db) {} outcome::result RocksDbBatch::put(const BufferView &key, BufferOrView &&value) { - return status_as_result( - batch_.Put(default_cf_, make_slice(key), make_slice(std::move(value)))); - } - - outcome::result RocksDbBatch::put(Space space, - const BufferView &key, - BufferOrView &&value) { - auto handle = db_->getCFHandle(space); - return status_as_result( - batch_.Put(handle, make_slice(key), make_slice(std::move(value)))); + batch_.Put(db_.column_, make_slice(key), make_slice(std::move(value))); + return outcome::success(); } outcome::result RocksDbBatch::remove(const BufferView &key) { - return status_as_result(batch_.Delete(default_cf_, make_slice(key))); - } - - outcome::result RocksDbBatch::remove(Space space, - const BufferView &key) { - auto handle = db_->getCFHandle(space); - return status_as_result(batch_.Delete(handle, make_slice(key))); + batch_.Delete(db_.column_, make_slice(key)); + return outcome::success(); } outcome::result RocksDbBatch::commit() { - return status_as_result(db_->db_->Write(db_->wo_, &batch_)); + auto rocks = db_.storage_.lock(); + if (!rocks) { + return DatabaseError::STORAGE_GONE; + } + auto status = rocks->db_->Write(rocks->wo_, &batch_); + if (status.ok()) { + return outcome::success(); + } + + return status_as_error(status); } void RocksDbBatch::clear() { diff --git a/core/storage/rocksdb/rocksdb_batch.hpp b/core/storage/rocksdb/rocksdb_batch.hpp index e7039a1b1f..e19476a5a6 100644 --- a/core/storage/rocksdb/rocksdb_batch.hpp +++ b/core/storage/rocksdb/rocksdb_batch.hpp @@ -6,22 +6,16 @@ #pragma once -#include #include - -#include "common/buffer.hpp" -#include "storage/buffer_map_types.hpp" -#include "storage/face/write_batch.hpp" #include "storage/rocksdb/rocksdb.hpp" namespace kagome::storage { - class RocksDbBatch : public BufferSpacedBatch, public BufferBatch { + class RocksDbBatch : public BufferBatch { public: ~RocksDbBatch() override = default; - explicit RocksDbBatch(std::shared_ptr db, - rocksdb::ColumnFamilyHandle *default_cf); + explicit RocksDbBatch(RocksDbSpace &db); outcome::result commit() override; @@ -30,16 +24,11 @@ namespace kagome::storage { outcome::result put(const BufferView &key, BufferOrView &&value) override; - outcome::result put(Space space, - const BufferView &key, - BufferOrView &&value) override; - outcome::result remove(const BufferView &key) override; - outcome::result remove(Space space, const BufferView &key) override; private: - std::shared_ptr db_; + // NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members) + RocksDbSpace &db_; rocksdb::WriteBatch batch_; - rocksdb::ColumnFamilyHandle *default_cf_; }; } // namespace kagome::storage diff --git a/core/storage/rocksdb/rocksdb_spaces.cpp b/core/storage/rocksdb/rocksdb_spaces.cpp index 36a33cdba1..bbffd72962 100644 --- a/core/storage/rocksdb/rocksdb_spaces.cpp +++ b/core/storage/rocksdb/rocksdb_spaces.cpp @@ -11,45 +11,29 @@ #include #include -#include namespace kagome::storage { - static constexpr std::array kSpaceNames{"lookup_key", - "header", - "block_body", - "justification", - "trie_node", - "trie_value", - "dispute_data", - "beefy_justification", - "avaliability_storage", - "audi_peers", - }; std::string spaceName(Space space) { - static_assert(kSpaceNames.size() == Space::kTotal - 1); + static constexpr std::array kNames{"lookup_key", + "header", + "block_body", + "justification", + "trie_node", + "dispute_data", + "beefy_justification", + "avaliability_storage", + "audi_peers"}; + static_assert(kNames.size() == Space::kTotal - 1); static const std::vector names = []() { std::vector names; names.push_back(rocksdb::kDefaultColumnFamilyName); - names.insert(names.end(), kSpaceNames.begin(), kSpaceNames.end()); + names.insert(names.end(), kNames.begin(), kNames.end()); return names; }(); BOOST_ASSERT(space < Space::kTotal); return names.at(space); } - std::optional spaceByName(std::string_view space) { - if (space == rocksdb::kDefaultColumnFamilyName) { - return Space::kDefault; - } - auto it = std::ranges::find(kSpaceNames, space); - if (it == kSpaceNames.end()) { - return std::nullopt; - } - auto idx = it - kSpaceNames.begin() + 1; - BOOST_ASSERT(idx < Space::kTotal); - return static_cast(idx); - } - } // namespace kagome::storage diff --git a/core/storage/rocksdb/rocksdb_spaces.hpp b/core/storage/rocksdb/rocksdb_spaces.hpp index 2ef87c36ca..41d8bc4039 100644 --- a/core/storage/rocksdb/rocksdb_spaces.hpp +++ b/core/storage/rocksdb/rocksdb_spaces.hpp @@ -8,19 +8,15 @@ #include "storage/spaces.hpp" -#include #include namespace kagome::storage { /** - * Get a space name as a string + * Map space item to its string name for Rocks DB needs + * @param space - space identifier + * @return string representation of space name */ std::string spaceName(Space space); - /** - * Get the space by its name - */ - std::optional spaceByName(std::string_view space); - } // namespace kagome::storage diff --git a/core/storage/rocksdb/rocksdb_util.hpp b/core/storage/rocksdb/rocksdb_util.hpp index 249f3fb456..162fbc9120 100644 --- a/core/storage/rocksdb/rocksdb_util.hpp +++ b/core/storage/rocksdb/rocksdb_util.hpp @@ -7,14 +7,10 @@ #pragma once #include -#include - #include "common/buffer.hpp" -#include "log/logger.hpp" #include "storage/database_error.hpp" namespace kagome::storage { - inline DatabaseError status_as_error(const rocksdb::Status &s) { if (s.IsNotFound()) { return DatabaseError::NOT_FOUND; @@ -41,13 +37,6 @@ namespace kagome::storage { return DatabaseError::UNKNOWN; } - inline outcome::result status_as_result(const rocksdb::Status &s) { - if (s.ok()) { - return outcome::success(); - } - return status_as_error(s); - } - inline rocksdb::Slice make_slice(const common::BufferView &buf) { // NOLINTNEXTLINE(cppcoreguidelines-pro-type-reinterpret-cast) const auto *ptr = reinterpret_cast(buf.data()); diff --git a/core/storage/spaced_storage.hpp b/core/storage/spaced_storage.hpp index cff8e1be94..8d8445d4fc 100644 --- a/core/storage/spaced_storage.hpp +++ b/core/storage/spaced_storage.hpp @@ -24,13 +24,7 @@ namespace kagome::storage { * @param space - identifier of required space * @return a pointer buffer storage for a space */ - virtual std::shared_ptr getSpace(Space space) = 0; - - /** - * Retrieve a batch to write into the storage atomically - * @return a new batch - */ - virtual std::unique_ptr createBatch() = 0; + virtual std::shared_ptr getSpace(Space space) = 0; }; } // namespace kagome::storage diff --git a/core/storage/spaces.hpp b/core/storage/spaces.hpp index a18f8f81ca..45cce8598e 100644 --- a/core/storage/spaces.hpp +++ b/core/storage/spaces.hpp @@ -20,7 +20,6 @@ namespace kagome::storage { kBlockBody, kJustification, kTrieNode, - kTrieValue, kDisputeData, kBeefyJustification, kAvaliabilityStorage, diff --git a/core/storage/trie/child_prefix.hpp b/core/storage/trie/child_prefix.hpp index 6ef8b1a97e..49319bdc1f 100644 --- a/core/storage/trie/child_prefix.hpp +++ b/core/storage/trie/child_prefix.hpp @@ -19,7 +19,7 @@ namespace kagome::storage::trie { void match(uint8_t nibble); void match(common::BufferView nibbles); - explicit operator bool() const; + operator bool() const; bool done() const; diff --git a/core/storage/trie/compact_decode.cpp b/core/storage/trie/compact_decode.cpp index 631d785787..99d79fe9b6 100644 --- a/core/storage/trie/compact_decode.cpp +++ b/core/storage/trie/compact_decode.cpp @@ -15,8 +15,6 @@ OUTCOME_CPP_DEFINE_CATEGORY(kagome::storage::trie, CompactDecodeError, e) { switch (e) { case E::INCOMPLETE_PROOF: return "incomplete proof"; - case E::NULL_BRANCH: - return "Unexpected null branch"; } abort(); } @@ -50,23 +48,21 @@ namespace kagome::storage::trie { db.emplace(hash, std::make_pair(std::move(value), nullptr)); node->setValue({std::nullopt, hash}); } - OUTCOME_TRY(cursor.push({ + cursor.push({ .node = node, .branch = 0, .child = false, .t = {}, - })); + }); return outcome::success(); }; OUTCOME_TRY(push()); while (not cursor.stack.empty()) { - OUTCOME_TRY(cursor.branchInit()); - while (not cursor.branch_end) { + for (cursor.branchInit(); not cursor.branch_end; cursor.branchNext()) { if (not cursor.branch_merkle) { - return CompactDecodeError::NULL_BRANCH; + throw std::logic_error{"compactDecode branch_merkle=null"}; } if (not cursor.branch_merkle->empty()) { - OUTCOME_TRY(cursor.branchNext()); continue; } OUTCOME_TRY(push()); @@ -77,10 +73,10 @@ namespace kagome::storage::trie { OUTCOME_TRY(raw, codec.encodeNode(*node, StateVersion::V0)); auto hash = codec.hash256(raw); db[hash] = {std::move(raw), std::move(node)}; - OUTCOME_TRY(cursor.pop()); + cursor.pop(); if (not cursor.stack.empty()) { *cursor.branch_merkle = hash; - OUTCOME_TRY(cursor.branchNext()); + cursor.branchNext(); } } } diff --git a/core/storage/trie/compact_decode.hpp b/core/storage/trie/compact_decode.hpp index 8aacc222f7..f1b3889f35 100644 --- a/core/storage/trie/compact_decode.hpp +++ b/core/storage/trie/compact_decode.hpp @@ -16,7 +16,6 @@ namespace kagome::storage::trie { enum class CompactDecodeError : uint8_t { INCOMPLETE_PROOF = 1, - NULL_BRANCH, }; using CompactDecoded = diff --git a/core/storage/trie/compact_encode.cpp b/core/storage/trie/compact_encode.cpp index ee7c18c9ca..c0e8bbe2c4 100644 --- a/core/storage/trie/compact_encode.cpp +++ b/core/storage/trie/compact_encode.cpp @@ -6,25 +6,11 @@ #include "storage/trie/compact_encode.hpp" -#include #include #include "storage/trie/raw_cursor.hpp" #include "storage/trie/serialization/polkadot_codec.hpp" -OUTCOME_CPP_DEFINE_CATEGORY(kagome::storage::trie, RawCursorError, e) { - using E = kagome::storage::trie::RawCursorError; - switch (e) { - case E::EmptyStack: - return "Unexpected empty stack"; - case E::ChildBranchNotFound: - return "Expected child branch is not found"; - case E::StackBackIsNotBranch: - return "No branch at the end of the stack"; - } - return "Unknown RawCursorError"; -} - namespace kagome::storage::trie { outcome::result compactEncode(const OnRead &db, const common::Hash256 &root) { @@ -34,7 +20,7 @@ namespace kagome::storage::trie { level.child = {}; std::unordered_set seen, value_seen; std::vector> proofs(2); - auto push = [&](const common::Hash256 &hash) -> outcome::result { + auto push = [&](const common::Hash256 &hash) { auto it = db.db.find(hash); if (it == db.db.end()) { return false; @@ -56,11 +42,11 @@ namespace kagome::storage::trie { } auto &level = levels.back(); auto &proof = proofs[levels.size() - 1]; - OUTCOME_TRY(level.push({ + level.push({ .node = std::move(node), .child = level.child, .t = proof.size(), - })); + }); proof.emplace_back(); if (compact) { proof.back().putUint8(kEscapeCompactHeader); @@ -68,7 +54,7 @@ namespace kagome::storage::trie { } return true; }; - OUTCOME_TRY(push(root)); + push(root); while (not levels.empty()) { auto &level = levels.back(); auto pop_level = true; @@ -76,27 +62,24 @@ namespace kagome::storage::trie { auto child = level.value_child; if (child and seen.emplace(*child).second) { levels.emplace_back(); - OUTCOME_TRY(push(*child)); + push(*child); pop_level = false; break; } - OUTCOME_TRY(level.branchInit()); - while (not level.branch_end) { - OUTCOME_TRY(push_success, push(*level.branch_hash)); + for (level.branchInit(); not level.branch_end; level.branchNext()) { if (level.branch_hash and seen.emplace(*level.branch_hash).second - and push_success) { + and push(*level.branch_hash)) { break; } - OUTCOME_TRY(level.branchNext()); } if (level.branch_end) { auto &item = level.stack.back(); auto &proof = proofs[levels.size() - 1][item.t]; proof.put(codec.encodeNode(*item.node, StateVersion::V0).value()); - OUTCOME_TRY(level.pop()); + level.pop(); if (not level.stack.empty()) { *level.branch_merkle = MerkleValue::create({}).value(); - OUTCOME_TRY(level.branchNext()); + level.branchNext(); } } } diff --git a/core/storage/trie/impl/topper_trie_batch_impl.cpp b/core/storage/trie/impl/topper_trie_batch_impl.cpp index 260fc2472f..48813eec8d 100644 --- a/core/storage/trie/impl/topper_trie_batch_impl.cpp +++ b/core/storage/trie/impl/topper_trie_batch_impl.cpp @@ -9,11 +9,8 @@ #include #include "common/buffer.hpp" -#include "crypto/blake2/blake2b.h" -#include "storage/face/generic_maps.hpp" #include "storage/trie/polkadot_trie/polkadot_trie_cursor.hpp" #include "storage/trie/polkadot_trie/trie_error.hpp" -#include "storage/trie/polkadot_trie/trie_node.hpp" OUTCOME_CPP_DEFINE_CATEGORY(kagome::storage::trie, TopperTrieBatchImpl::Error, @@ -119,7 +116,7 @@ namespace kagome::storage::trie { } outcome::result TopperTrieBatchImpl::apply( - face::Writeable &map) { + storage::BufferStorage &map) { for (auto &[k, v] : cache_) { if (v) { OUTCOME_TRY(map.put(k, BufferView{*v})); @@ -164,7 +161,7 @@ namespace kagome::storage::trie { } bool TopperTrieCursor::isValid() const { - return static_cast(choice_); + return choice_; } outcome::result TopperTrieCursor::next() { @@ -186,20 +183,6 @@ namespace kagome::storage::trie { : parent_cursor_->value(); } - std::optional TopperTrieCursor::valueHash() - const { - auto value_opt = value(); - if (!value_opt) { - return std::nullopt; - } - if (value_opt->size() >= Hash256::size() + 1) { - return ValueHash{.hash = crypto::blake2b<32>(*value_opt), .small = false}; - } - Hash256 value_as_hash{}; - std::copy(value_opt->begin(), value_opt->end(), value_as_hash.begin()); - return ValueHash{.hash = value_as_hash, .small = true}; - } - outcome::result TopperTrieCursor::seekLowerBound( const BufferView &key) { OUTCOME_TRY(parent_cursor_->seekLowerBound(key)); diff --git a/core/storage/trie/impl/topper_trie_batch_impl.hpp b/core/storage/trie/impl/topper_trie_batch_impl.hpp index 1590e6a610..97bd487ea4 100644 --- a/core/storage/trie/impl/topper_trie_batch_impl.hpp +++ b/core/storage/trie/impl/topper_trie_batch_impl.hpp @@ -6,11 +6,6 @@ #pragma once -#include "common/blob.hpp" -#include "common/buffer.hpp" -#include "storage/buffer_map_types.hpp" -#include "storage/face/writeable.hpp" -#include "storage/trie/polkadot_trie/trie_node.hpp" #include "storage/trie/trie_batches.hpp" #include @@ -53,7 +48,7 @@ namespace kagome::storage::trie { outcome::result>> createChildBatch( common::BufferView path) override; - outcome::result apply(face::Writeable &map); + outcome::result apply(storage::BufferStorage &map); private: std::map> cache_; @@ -80,7 +75,6 @@ namespace kagome::storage::trie { outcome::result prev() override; std::optional key() const override; std::optional value() const override; - std::optional valueHash() const override; outcome::result seekLowerBound(const BufferView &key) override; outcome::result seekUpperBound(const BufferView &key) override; @@ -94,7 +88,7 @@ namespace kagome::storage::trie { struct Choice { Choice(bool parent, bool overlay) : parent{parent}, overlay{overlay} {} - explicit operator bool() const { + operator bool() const { return parent || overlay; } diff --git a/core/storage/trie/impl/trie_storage_backend_batch.cpp b/core/storage/trie/impl/trie_storage_backend_batch.cpp new file mode 100644 index 0000000000..17e1393693 --- /dev/null +++ b/core/storage/trie/impl/trie_storage_backend_batch.cpp @@ -0,0 +1,35 @@ +/** + * Copyright Quadrivium LLC + * All Rights Reserved + * SPDX-License-Identifier: Apache-2.0 + */ + +#include "storage/trie/impl/trie_storage_backend_batch.hpp" + +namespace kagome::storage::trie { + + TrieStorageBackendBatch::TrieStorageBackendBatch( + std::unique_ptr storage_batch) + : storage_batch_{std::move(storage_batch)} { + BOOST_ASSERT(storage_batch_ != nullptr); + } + + outcome::result TrieStorageBackendBatch::commit() { + return storage_batch_->commit(); + } + + void TrieStorageBackendBatch::clear() { + storage_batch_->clear(); + } + + outcome::result TrieStorageBackendBatch::put( + const common::BufferView &key, BufferOrView &&value) { + return storage_batch_->put(key, std::move(value)); + } + + outcome::result TrieStorageBackendBatch::remove( + const common::BufferView &key) { + return storage_batch_->remove(key); + } + +} // namespace kagome::storage::trie diff --git a/core/storage/trie/impl/trie_storage_backend_batch.hpp b/core/storage/trie/impl/trie_storage_backend_batch.hpp new file mode 100644 index 0000000000..4785d530d1 --- /dev/null +++ b/core/storage/trie/impl/trie_storage_backend_batch.hpp @@ -0,0 +1,34 @@ +/** + * Copyright Quadrivium LLC + * All Rights Reserved + * SPDX-License-Identifier: Apache-2.0 + */ + +#pragma once + +#include "storage/buffer_map_types.hpp" + +namespace kagome::storage::trie { + + /** + * Batch implementation for TrieStorageBackend + * @see TrieStorageBackend + */ + class TrieStorageBackendBatch : public BufferBatch { + public: + TrieStorageBackendBatch(std::unique_ptr storage_batch); + ~TrieStorageBackendBatch() override = default; + + outcome::result commit() override; + + outcome::result put(const common::BufferView &key, + BufferOrView &&value) override; + + outcome::result remove(const common::BufferView &key) override; + void clear() override; + + private: + std::unique_ptr storage_batch_; + }; + +} // namespace kagome::storage::trie diff --git a/core/storage/trie/impl/trie_storage_backend_impl.cpp b/core/storage/trie/impl/trie_storage_backend_impl.cpp index cd8b26aae9..c8b60e5e73 100644 --- a/core/storage/trie/impl/trie_storage_backend_impl.cpp +++ b/core/storage/trie/impl/trie_storage_backend_impl.cpp @@ -9,5 +9,46 @@ #include #include "storage/spaces.hpp" +#include "storage/trie/impl/trie_storage_backend_batch.hpp" -namespace kagome::storage::trie {} // namespace kagome::storage::trie +namespace kagome::storage::trie { + + TrieStorageBackendImpl::TrieStorageBackendImpl( + std::shared_ptr storage) + : storage_{storage->getSpace(Space::kTrieNode)} { + BOOST_ASSERT(storage_ != nullptr); + } + + std::unique_ptr + TrieStorageBackendImpl::cursor() { + return storage_->cursor(); + } + + std::unique_ptr TrieStorageBackendImpl::batch() { + return std::make_unique(storage_->batch()); + } + + outcome::result TrieStorageBackendImpl::get( + const BufferView &key) const { + return storage_->get(key); + } + + outcome::result> TrieStorageBackendImpl::tryGet( + const BufferView &key) const { + return storage_->tryGet(key); + } + + outcome::result TrieStorageBackendImpl::contains( + const BufferView &key) const { + return storage_->contains(key); + } + + outcome::result TrieStorageBackendImpl::put(const BufferView &key, + BufferOrView &&value) { + return storage_->put(key, std::move(value)); + } + + outcome::result TrieStorageBackendImpl::remove(const BufferView &key) { + return storage_->remove(key); + } +} // namespace kagome::storage::trie diff --git a/core/storage/trie/impl/trie_storage_backend_impl.hpp b/core/storage/trie/impl/trie_storage_backend_impl.hpp index a015de06cb..95786ad952 100644 --- a/core/storage/trie/impl/trie_storage_backend_impl.hpp +++ b/core/storage/trie/impl/trie_storage_backend_impl.hpp @@ -15,25 +15,24 @@ namespace kagome::storage::trie { class TrieStorageBackendImpl : public TrieStorageBackend { public: - TrieStorageBackendImpl(std::shared_ptr db) - : db_{std::move(db)} { - BOOST_ASSERT(db_ != nullptr); - } + TrieStorageBackendImpl(std::shared_ptr storage); - BufferStorage &nodes() override { - return *db_->getSpace(Space::kTrieNode); - } + ~TrieStorageBackendImpl() override = default; - BufferStorage &values() override { - return *db_->getSpace(Space::kTrieValue); - } + std::unique_ptr cursor() override; + std::unique_ptr batch() override; - std::unique_ptr batch() override { - return db_->createBatch(); - } + outcome::result get(const BufferView &key) const override; + outcome::result> tryGet( + const BufferView &key) const override; + outcome::result contains(const BufferView &key) const override; + + outcome::result put(const BufferView &key, + BufferOrView &&value) override; + outcome::result remove(const common::BufferView &key) override; private: - std::shared_ptr db_; + std::shared_ptr storage_; }; } // namespace kagome::storage::trie diff --git a/core/storage/trie/polkadot_trie/polkadot_trie_cursor.hpp b/core/storage/trie/polkadot_trie/polkadot_trie_cursor.hpp index 173450a830..6f7674caf8 100644 --- a/core/storage/trie/polkadot_trie/polkadot_trie_cursor.hpp +++ b/core/storage/trie/polkadot_trie/polkadot_trie_cursor.hpp @@ -28,14 +28,6 @@ namespace kagome::storage::trie { */ virtual outcome::result seekUpperBound( const common::BufferView &key) = 0; - - // small values (less than hash size) are not hashed and stored as-is inside - // their node - struct ValueHash { - Hash256 hash; - bool small = false; - }; - [[nodiscard]] virtual std::optional valueHash() const = 0; }; } // namespace kagome::storage::trie diff --git a/core/storage/trie/polkadot_trie/polkadot_trie_cursor_impl.cpp b/core/storage/trie/polkadot_trie/polkadot_trie_cursor_impl.cpp index 8aa3ae3675..76fc287a28 100644 --- a/core/storage/trie/polkadot_trie/polkadot_trie_cursor_impl.cpp +++ b/core/storage/trie/polkadot_trie/polkadot_trie_cursor_impl.cpp @@ -8,8 +8,6 @@ #include -#include "common/blob.hpp" -#include "crypto/blake2/blake2b.h" #include "macro/unreachable.hpp" #include "storage/trie/polkadot_trie/polkadot_trie.hpp" #include "storage/trie/polkadot_trie/trie_error.hpp" @@ -353,30 +351,6 @@ namespace kagome::storage::trie { return std::nullopt; } - std::optional - PolkadotTrieCursorImpl::valueHash() const { - if (const auto *search_state = std::get_if(&state_); - search_state != nullptr) { - const auto &value_opt = search_state->getCurrent().getValue(); - - if (value_opt.hash) { - return ValueHash{.hash = value_opt.hash.value(), .small = false}; - } - if (value_opt.value) { - if (value_opt.value->size() >= Hash256::size() + 1) { - return ValueHash{.hash = crypto::blake2b<32>(value_opt.value.value()), - .small = false}; - } - Hash256 value_as_hash{}; - std::copy(value_opt.value.value().begin(), - value_opt.value.value().end(), - value_as_hash.begin()); - return ValueHash{.hash = value_as_hash, .small = true}; - } - } - return std::nullopt; - } - auto PolkadotTrieCursorImpl::makeSearchStateAt(const common::BufferView &key) -> outcome::result { if (trie_->getRoot() == nullptr) { diff --git a/core/storage/trie/polkadot_trie/polkadot_trie_cursor_impl.hpp b/core/storage/trie/polkadot_trie/polkadot_trie_cursor_impl.hpp index 12a718cb42..458568da5f 100644 --- a/core/storage/trie/polkadot_trie/polkadot_trie_cursor_impl.hpp +++ b/core/storage/trie/polkadot_trie/polkadot_trie_cursor_impl.hpp @@ -6,11 +6,9 @@ #pragma once -#include "common/blob.hpp" #include "storage/trie/polkadot_trie/polkadot_trie_cursor.hpp" #include "log/logger.hpp" -#include "storage/trie/polkadot_trie/trie_node.hpp" #include "storage/trie/serialization/polkadot_codec.hpp" namespace kagome::storage::trie { @@ -68,8 +66,6 @@ namespace kagome::storage::trie { [[nodiscard]] std::optional value() const override; - [[nodiscard]] std::optional valueHash() const override; - private: outcome::result seekLowerBoundInternal(const TrieNode ¤t, BufferView left_nibbles); diff --git a/core/storage/trie/polkadot_trie/polkadot_trie_impl.hpp b/core/storage/trie/polkadot_trie/polkadot_trie_impl.hpp index 8101eb2df3..98022d3b59 100644 --- a/core/storage/trie/polkadot_trie/polkadot_trie_impl.hpp +++ b/core/storage/trie/polkadot_trie/polkadot_trie_impl.hpp @@ -6,7 +6,6 @@ #pragma once -#include "common/buffer_view.hpp" #include "storage/trie/polkadot_trie/polkadot_trie.hpp" #include "log/logger.hpp" diff --git a/core/storage/trie/polkadot_trie/trie_node.hpp b/core/storage/trie/polkadot_trie/trie_node.hpp index b7ca4a4e80..0c3a42aca2 100644 --- a/core/storage/trie/polkadot_trie/trie_node.hpp +++ b/core/storage/trie/polkadot_trie/trie_node.hpp @@ -133,14 +133,10 @@ namespace kagome::storage::trie { bool dirty = true) : hash{hash}, value{std::move(value)}, dirty_{dirty} {} - explicit operator bool() const { + operator bool() const { return is_some(); } - bool operator==(const ValueAndHash &rhs) const { - return std::tie(value, hash) == std::tie(rhs.value, rhs.hash); - } - bool is_none() const { return !is_some(); } diff --git a/core/storage/trie/raw_cursor.hpp b/core/storage/trie/raw_cursor.hpp index 90bdcb63d1..452a3e13dd 100644 --- a/core/storage/trie/raw_cursor.hpp +++ b/core/storage/trie/raw_cursor.hpp @@ -6,23 +6,10 @@ #pragma once -#include #include "storage/trie/child_prefix.hpp" #include "storage/trie/polkadot_trie/trie_node.hpp" namespace kagome::storage::trie { - - enum RawCursorError : uint8_t { - EmptyStack = 1, - ChildBranchNotFound, - StackBackIsNotBranch - }; -} - -OUTCOME_HPP_DECLARE_ERROR(kagome::storage::trie, RawCursorError); - -namespace kagome::storage::trie { - template struct RawCursor { struct Item { @@ -32,7 +19,7 @@ namespace kagome::storage::trie { T t; }; - outcome::result update() { + void update() { child = false; branch_merkle = nullptr; branch_hash.reset(); @@ -40,7 +27,7 @@ namespace kagome::storage::trie { value_hash = nullptr; value_child.reset(); if (stack.empty()) { - return outcome::success(); + return; } auto &item = stack.back(); child = item.child; @@ -53,7 +40,7 @@ namespace kagome::storage::trie { // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-constant-array-index) auto &branch = branches[i]; if (not branch) { - return RawCursorError::ChildBranchNotFound; + throw std::logic_error{"RawCursor::update branches[branch]=null"}; } branch_merkle = &dynamic_cast(*branch).db_key; @@ -69,35 +56,33 @@ namespace kagome::storage::trie { value_child = common::Hash256::fromSpan(*value.value).value(); } } - return outcome::success(); } - outcome::result push(Item &&item) { + void push(Item &&item) { if (not stack.empty() and not stack.back().branch) { - return RawCursorError::StackBackIsNotBranch; + throw std::logic_error{"RawCursor::push branch=None"}; } item.child.match(item.node->getKeyNibbles()); stack.emplace_back(std::move(item)); if (stack.back().branch) { - OUTCOME_TRY(branchInit()); + branchInit(); } else { - OUTCOME_TRY(update()); + update(); } - return outcome::success(); } - outcome::result pop() { + void pop() { stack.pop_back(); - return update(); + update(); } - outcome::result branchInit() { - return branchNext(false); + void branchInit() { + branchNext(false); } - outcome::result branchNext(bool next = true) { + void branchNext(bool next = true) { if (stack.empty()) { - return RawCursorError::EmptyStack; + throw std::logic_error{"RawCursor::branchNext top=null"}; } auto &item = stack.back(); auto &i = item.branch; @@ -118,7 +103,7 @@ namespace kagome::storage::trie { next = false; } } - return update(); + update(); } std::vector stack; diff --git a/core/storage/trie/serialization/trie_serializer_impl.cpp b/core/storage/trie/serialization/trie_serializer_impl.cpp index 23285b50ed..2faa0cac32 100644 --- a/core/storage/trie/serialization/trie_serializer_impl.cpp +++ b/core/storage/trie/serialization/trie_serializer_impl.cpp @@ -17,13 +17,13 @@ namespace kagome::storage::trie { TrieSerializerImpl::TrieSerializerImpl( std::shared_ptr factory, std::shared_ptr codec, - std::shared_ptr storage_backend) + std::shared_ptr node_backend) : trie_factory_{std::move(factory)}, codec_{std::move(codec)}, - storage_backend_{std::move(storage_backend)} { + node_backend_{std::move(node_backend)} { BOOST_ASSERT(trie_factory_ != nullptr); BOOST_ASSERT(codec_ != nullptr); - BOOST_ASSERT(storage_backend_ != nullptr); + BOOST_ASSERT(node_backend_ != nullptr); } RootHash TrieSerializerImpl::getEmptyRootHash() const { @@ -65,31 +65,32 @@ namespace kagome::storage::trie { outcome::result TrieSerializerImpl::storeRootNode( TrieNode &node, StateVersion version) { - auto batch = storage_backend_->batch(); + auto batch = node_backend_->batch(); BOOST_ASSERT(batch != nullptr); - auto visitor = [&](Codec::Visitee visitee) -> outcome::result { - if (auto child_data = std::get_if(&visitee); - child_data != nullptr) { - if (child_data->merkle_value.isHash()) { - return batch->put(Space::kTrieNode, - child_data->merkle_value.asBuffer(), - std::move(child_data->encoding)); - } - return outcome::success(); // nodes which encoding is shorter - // than its hash are not stored in - // the DB separately - } - auto value_data = std::get(visitee); - // value_data.value is a reference to a buffer stored outside of - // this lambda, so taking its view should be okay - return batch->put( - Space::kTrieValue, value_data.hash, value_data.value.view()); - }; - - OUTCOME_TRY(enc, codec_->encodeNode(node, version, visitor)); + OUTCOME_TRY( + enc, + codec_->encodeNode( + node, + version, + [&](Codec::Visitee visitee) -> outcome::result { + if (auto child_data = std::get_if(&visitee); + child_data != nullptr) { + if (child_data->merkle_value.isHash()) { + return batch->put(child_data->merkle_value.asBuffer(), + std::move(child_data->encoding)); + } + return outcome::success(); // nodes which encoding is shorter + // than its hash are not stored in + // the DB separately + } + auto value_data = std::get(visitee); + // value_data.value is a reference to a buffer stored outside of + // this lambda, so taking its view should be okay + return batch->put(value_data.hash, value_data.value.view()); + })); auto hash = codec_->hash256(enc); - OUTCOME_TRY(batch->put(Space::kTrieNode, hash, std::move(enc))); + OUTCOME_TRY(batch->put(hash, std::move(enc))); OUTCOME_TRY(batch->commit()); return hash; @@ -112,7 +113,7 @@ namespace kagome::storage::trie { } BufferOrView enc; if (auto hash = db_key.asHash()) { - BOOST_OUTCOME_TRY(enc, storage_backend_->nodes().get(*hash)); + BOOST_OUTCOME_TRY(enc, node_backend_->get(*hash)); if (on_node_loaded) { on_node_loaded(*hash, enc); } @@ -128,7 +129,7 @@ namespace kagome::storage::trie { outcome::result> TrieSerializerImpl::retrieveValue(const common::Hash256 &hash, const OnNodeLoaded &on_node_loaded) const { - OUTCOME_TRY(value, storage_backend_->values().tryGet(hash)); + OUTCOME_TRY(value, node_backend_->tryGet(hash)); return common::map_optional(std::move(value), [&](common::BufferOrView &&value) { if (on_node_loaded) { diff --git a/core/storage/trie/serialization/trie_serializer_impl.hpp b/core/storage/trie/serialization/trie_serializer_impl.hpp index fd422a8250..3411c8fea7 100644 --- a/core/storage/trie/serialization/trie_serializer_impl.hpp +++ b/core/storage/trie/serialization/trie_serializer_impl.hpp @@ -24,7 +24,7 @@ namespace kagome::storage::trie { public: TrieSerializerImpl(std::shared_ptr factory, std::shared_ptr codec, - std::shared_ptr storage_backend); + std::shared_ptr node_backend); ~TrieSerializerImpl() override = default; RootHash getEmptyRootHash() const override; @@ -66,6 +66,6 @@ namespace kagome::storage::trie { std::shared_ptr trie_factory_; std::shared_ptr codec_; - std::shared_ptr storage_backend_; + std::shared_ptr node_backend_; }; } // namespace kagome::storage::trie diff --git a/core/storage/trie/trie_batches.hpp b/core/storage/trie/trie_batches.hpp index b7ecb7e780..6c6b177066 100644 --- a/core/storage/trie/trie_batches.hpp +++ b/core/storage/trie/trie_batches.hpp @@ -6,9 +6,7 @@ #pragma once -#include "common/buffer.hpp" #include "storage/buffer_map_types.hpp" -#include "storage/face/generic_maps.hpp" #include "storage/trie/polkadot_trie/polkadot_trie_cursor.hpp" #include "storage/trie/types.hpp" diff --git a/core/storage/trie/trie_storage_backend.hpp b/core/storage/trie/trie_storage_backend.hpp index aed7276407..2a893ba1b8 100644 --- a/core/storage/trie/trie_storage_backend.hpp +++ b/core/storage/trie/trie_storage_backend.hpp @@ -15,15 +15,12 @@ namespace kagome::storage::trie { /** - * Provides storage for trie nodes and values. + * Adapter for the trie node storage that allows to hide keyspace separation + * along with root hash storing logic from the trie db component */ - class TrieStorageBackend { + class TrieStorageBackend : public BufferStorage { public: - virtual ~TrieStorageBackend() = default; - - virtual BufferStorage &nodes() = 0; - virtual BufferStorage &values() = 0; - virtual std::unique_ptr batch() = 0; + ~TrieStorageBackend() override = default; }; } // namespace kagome::storage::trie diff --git a/core/storage/trie_pruner/impl/trie_pruner_impl.cpp b/core/storage/trie_pruner/impl/trie_pruner_impl.cpp index 34ba6e54f3..ea3da3d5e8 100644 --- a/core/storage/trie_pruner/impl/trie_pruner_impl.cpp +++ b/core/storage/trie_pruner/impl/trie_pruner_impl.cpp @@ -15,7 +15,6 @@ #include "application/app_state_manager.hpp" #include "blockchain/block_tree.hpp" #include "crypto/hasher/hasher_impl.hpp" -#include "storage/buffer_map_types.hpp" #include "storage/database_error.hpp" #include "storage/predefined_keys.hpp" #include "storage/spaced_storage.hpp" @@ -60,21 +59,20 @@ namespace kagome::storage::trie_pruner { TriePrunerImpl::TriePrunerImpl( std::shared_ptr app_state_manager, + std::shared_ptr node_storage, std::shared_ptr serializer, std::shared_ptr codec, std::shared_ptr storage, std::shared_ptr hasher, std::shared_ptr config) - : serializer_{std::move(serializer)}, + : node_storage_{std::move(node_storage)}, + serializer_{std::move(serializer)}, codec_{std::move(codec)}, storage_{std::move(storage)}, - node_storage_{storage_->getSpace(Space::kTrieNode)}, - value_storage_{storage_->getSpace(Space::kTrieValue)}, hasher_{std::move(hasher)}, pruning_depth_{config->statePruningDepth()}, thorough_pruning_{config->enableThoroughPruning()} { BOOST_ASSERT(node_storage_ != nullptr); - BOOST_ASSERT(value_storage_ != nullptr); BOOST_ASSERT(serializer_ != nullptr); BOOST_ASSERT(codec_ != nullptr); BOOST_ASSERT(storage_ != nullptr); @@ -176,9 +174,9 @@ namespace kagome::storage::trie_pruner { outcome::result TriePrunerImpl::pruneFinalized( const primitives::BlockHeader &block) { std::unique_lock lock{mutex_}; - auto batch = storage_->createBatch(); - OUTCOME_TRY(prune(*batch, block.state_root)); - OUTCOME_TRY(batch->commit()); + auto node_batch = node_storage_->batch(); + OUTCOME_TRY(prune(*node_batch, block.state_root)); + OUTCOME_TRY(node_batch->commit()); last_pruned_block_ = block.blockInfo(); OUTCOME_TRY(savePersistentState()); @@ -189,13 +187,15 @@ namespace kagome::storage::trie_pruner { const primitives::BlockHeader &block) { std::unique_lock lock{mutex_}; // should prune even when pruning depth is none - auto batch = storage_->createBatch(); - OUTCOME_TRY(prune(*batch, block.state_root)); - OUTCOME_TRY(batch->commit()); + auto node_batch = node_storage_->batch(); + auto value_batch = node_storage_->batch(); + OUTCOME_TRY(prune(*node_batch, block.state_root)); + OUTCOME_TRY(node_batch->commit()); + OUTCOME_TRY(value_batch->commit()); return outcome::success(); } - outcome::result TriePrunerImpl::prune(BufferSpacedBatch &batch, + outcome::result TriePrunerImpl::prune(BufferBatch &node_batch, const trie::RootHash &root_hash) { auto trie_res = serializer_->retrieveTrie(root_hash, nullptr); if (trie_res.has_error() @@ -216,9 +216,9 @@ namespace kagome::storage::trie_pruner { OUTCOME_TRY( forEachChildTrie(*trie, - [this, &batch](common::BufferView child_key, - const trie::RootHash &child_hash) { - return prune(batch, child_hash); + [this, &node_batch](common::BufferView child_key, + const trie::RootHash &child_hash) { + return prune(node_batch, child_hash); })); size_t nodes_removed = 0; @@ -268,7 +268,7 @@ namespace kagome::storage::trie_pruner { && ref_count == 0) { nodes_removed++; ref_count_.erase(ref_count_it); - OUTCOME_TRY(batch.remove(Space::kTrieNode, hash)); + OUTCOME_TRY(node_batch.remove(hash)); auto hash_opt = node->getValue().hash; if (hash_opt.has_value()) { auto &value_hash = *hash_opt; @@ -279,7 +279,7 @@ namespace kagome::storage::trie_pruner { auto &value_ref_count = value_ref_it->second; value_ref_count--; if (value_ref_count == 0) { - OUTCOME_TRY(batch.remove(Space::kTrieValue, value_hash)); + OUTCOME_TRY(node_batch.remove(value_hash)); value_ref_count_.erase(value_ref_it); values_removed++; } @@ -407,9 +407,7 @@ namespace kagome::storage::trie_pruner { auto value_hash_opt = encoder.getValueHash(*node, version); if (value_hash_opt) { auto &value_ref_count = value_ref_count_[*value_hash_opt]; - OUTCOME_TRY( - contains_value, - storage_->getSpace(Space::kTrieValue)->contains(*value_hash_opt)); + OUTCOME_TRY(contains_value, node_storage_->contains(*value_hash_opt)); if (value_ref_count == 0 && contains_value && !thorough_pruning_) { value_ref_count++; } diff --git a/core/storage/trie_pruner/impl/trie_pruner_impl.hpp b/core/storage/trie_pruner/impl/trie_pruner_impl.hpp index 9e5e6c5a8d..d52ddb9887 100644 --- a/core/storage/trie_pruner/impl/trie_pruner_impl.hpp +++ b/core/storage/trie_pruner/impl/trie_pruner_impl.hpp @@ -62,6 +62,7 @@ namespace kagome::storage::trie_pruner { TriePrunerImpl( std::shared_ptr app_state_manager, + std::shared_ptr node_storage, std::shared_ptr serializer, std::shared_ptr codec, std::shared_ptr storage, @@ -118,7 +119,7 @@ namespace kagome::storage::trie_pruner { const primitives::BlockHeader &last_pruned_block, const blockchain::BlockTree &block_tree); - outcome::result prune(BufferSpacedBatch &batch, + outcome::result prune(BufferBatch &node_batch, const storage::trie::RootHash &state); outcome::result addNewStateWith( @@ -133,11 +134,10 @@ namespace kagome::storage::trie_pruner { std::unordered_set immortal_nodes_; std::optional last_pruned_block_; + std::shared_ptr node_storage_; std::shared_ptr serializer_; std::shared_ptr codec_; std::shared_ptr storage_; - std::shared_ptr node_storage_; - std::shared_ptr value_storage_; std::shared_ptr hasher_; const std::optional pruning_depth_{}; diff --git a/core/utils/kagome_db_editor.cpp b/core/utils/kagome_db_editor.cpp index 45cab3a126..bc0c65fc06 100644 --- a/core/utils/kagome_db_editor.cpp +++ b/core/utils/kagome_db_editor.cpp @@ -4,8 +4,6 @@ * SPDX-License-Identifier: Apache-2.0 */ -#include "storage/buffer_map_types.hpp" -#include "storage/trie/trie_storage_backend.hpp" #if defined(BACKWARD_HAS_BACKTRACE) #include #endif @@ -17,7 +15,6 @@ #include #include "blockchain/block_storage_error.hpp" -#include "blockchain/impl/block_header_repository_impl.hpp" #include "blockchain/impl/block_storage_impl.hpp" #include "blockchain/impl/block_tree_impl.hpp" #include "blockchain/impl/storage_util.hpp" @@ -26,7 +23,6 @@ #include "crypto/hasher/hasher_impl.hpp" #include "network/impl/extrinsic_observer_impl.hpp" #include "runtime/common/runtime_upgrade_tracker_impl.hpp" -#include "storage/face/map_cursor.hpp" #include "storage/predefined_keys.hpp" #include "storage/rocksdb/rocksdb.hpp" #include "storage/trie/impl/trie_storage_backend_impl.hpp" @@ -45,16 +41,20 @@ using common::BufferView; // NOLINTBEGIN(cppcoreguidelines-pro-bounds-pointer-arithmetic) -struct TrieTracker : storage::BufferStorage { - TrieTracker(storage::BufferStorage &inner) : inner{inner} {} +struct TrieTracker : TrieStorageBackend { + TrieTracker(std::shared_ptr inner) + : inner{std::move(inner)} {} std::unique_ptr cursor() override { abort(); } + std::unique_ptr batch() override { + abort(); + } outcome::result get(const BufferView &key) const override { track(key); - return inner.get(key); + return inner->get(key); } outcome::result> tryGet( const BufferView &key) const override { @@ -68,7 +68,6 @@ struct TrieTracker : storage::BufferStorage { BufferOrView &&value) override { abort(); } - outcome::result remove(const common::BufferView &key) override { abort(); } @@ -80,31 +79,10 @@ struct TrieTracker : storage::BufferStorage { return keys.contains(common::Hash256::fromSpan(key).value()); } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members) - storage::BufferStorage &inner; + std::shared_ptr inner; mutable std::set keys; }; -struct TrieTrackerBackend : TrieStorageBackend { - TrieTrackerBackend(std::shared_ptr backend) - : backend{std::move(backend)}, node_tracker{backend->nodes()} {} - - storage::BufferStorage &nodes() override { - return node_tracker; - } - - storage::BufferStorage &values() override { - return backend->values(); - } - - std::unique_ptr batch() override { - return backend->batch(); - } - - std::shared_ptr backend; - TrieTracker node_tracker; -}; - template using sptr = std::shared_ptr; @@ -259,11 +237,11 @@ int db_editor_main(int argc, const char **argv) { auto factory = std::make_shared(); std::shared_ptr storage; - std::shared_ptr buffer_storage; + std::shared_ptr buffer_storage; try { storage = storage::RocksDb::create(argv[DB_PATH], rocksdb::Options()).value(); - storage->dropColumn(storage::Space::kBlockBody).value(); + storage->dropColumn(storage::Space::kBlockBody); buffer_storage = storage->getSpace(storage::Space::kDefault); } catch (std::system_error &e) { log->error("{}", e.what()); @@ -271,7 +249,7 @@ int db_editor_main(int argc, const char **argv) { return 0; } - auto trie_node_tracker = std::make_shared( + auto trie_node_tracker = std::make_shared( std::make_shared(storage)); auto injector = di::make_injector( @@ -289,7 +267,7 @@ int db_editor_main(int argc, const char **argv) { }), di::bind.to(factory), di::bind.template to(), - di::bind.template to(), + di::bind.template to(), di::bind.template to()); auto hasher = injector.template create>(); @@ -311,9 +289,8 @@ int db_editor_main(int argc, const char **argv) { primitives::BlockInfo best_leaf( std::numeric_limits::min(), {}); for (auto hash : block_tree_leaf_hashes) { - auto number = check(check(block_storage->getBlockHeader(hash)).value()) - .value() - .number; + auto number = + check(check(block_storage->getBlockHeader(hash))).value().number; const auto &leaf = *leafs.emplace(number, hash).first; SL_TRACE(log, "Leaf {} found", leaf); if (leaf.number <= least_leaf.number) { @@ -338,8 +315,7 @@ int db_editor_main(int argc, const char **argv) { auto &block = node.value(); auto header = - check(check(block_storage->getBlockHeader(block.hash)).value()) - .value(); + check(check(block_storage->getBlockHeader(block.hash))).value(); if (header.number == 0) { last_finalized_block = block; last_finalized_block_header = header; @@ -418,7 +394,6 @@ int db_editor_main(int argc, const char **argv) { } auto trie_node_storage = storage->getSpace(storage::Space::kTrieNode); - auto trie_value_storage = storage->getSpace(storage::Space::kTrieValue); auto track_trie_entries = [&log, &buffer_storage, &prefix](auto storage, auto tracker) { @@ -430,7 +405,7 @@ int db_editor_main(int argc, const char **argv) { TicToc t2("Process DB.", log); while (db_cursor->isValid() && db_cursor->key().has_value()) { auto key = db_cursor->key().value(); - if (tracker->node_tracker.tracked(key)) { + if (tracker->tracked(key)) { db_cursor->next().value(); continue; } diff --git a/core/utils/sptr.hpp b/core/utils/sptr.hpp new file mode 100644 index 0000000000..d793e1c81b --- /dev/null +++ b/core/utils/sptr.hpp @@ -0,0 +1,16 @@ +/** + * Copyright Quadrivium LLC + * All Rights Reserved + * SPDX-License-Identifier: Apache-2.0 + */ + +#pragma once + +#include + +namespace kagome { + template + auto toSptr(T &&t) { + return std::make_shared>(std::forward(t)); + } +} // namespace kagome diff --git a/core/utils/storage_explorer.cpp b/core/utils/storage_explorer.cpp index c9941a7e02..a36bedfeff 100644 --- a/core/utils/storage_explorer.cpp +++ b/core/utils/storage_explorer.cpp @@ -203,14 +203,11 @@ class InspectBlockCommand : public Command { } const auto &hash = hash_opt_res.value().value(); - auto header_opt_res = block_storage->getBlockHeader(hash); - if (header_opt_res.has_error()) { - throwError("Internal error: {}}", header_opt_res.error()); + auto header_res = block_storage->getBlockHeader(hash); + if (header_res.has_error()) { + throwError("Internal error: {}}", header_res.error()); } - if (header_opt_res.value().has_value()) { - throwError("Block header not found for '{}'", args[1]); - } - const auto &header = header_opt_res.value().value(); + const auto &header = header_res.value(); std::cout << "#: " << header.number << "\n"; std::cout << "Parent hash: " << header.parent_hash.toHex() << "\n"; @@ -376,12 +373,8 @@ class SearchChainCommand : public Command { } const auto &hash = hash_opt_res.value().value(); - auto start_header_opt = unwrapResult("Getting 'start' block header", - block_storage->getBlockHeader(hash)); - if (!start_header_opt) { - throwError("Start block header {} not found", start); - } - auto &start_header = start_header_opt.value(); + auto start_header = unwrapResult("Getting 'start' block header", + block_storage->getBlockHeader(hash)); auto end_hash_opt = unwrapResult("Getting 'end' block header", block_storage->getBlockHash(end)); @@ -390,12 +383,8 @@ class SearchChainCommand : public Command { } const auto &end_hash = end_hash_opt.value(); - auto end_header_opt = unwrapResult("Getting 'end' block header", - block_storage->getBlockHeader(end_hash)); - if (!end_header_opt) { - throwError("'End block header {} not found", end); - } - auto &end_header = end_header_opt.value(); + auto end_header = unwrapResult("Getting 'end' block header", + block_storage->getBlockHeader(end_hash)); for (int64_t current = start_header.number, stop = end_header.number; current <= stop; @@ -408,13 +397,10 @@ class SearchChainCommand : public Command { } const auto ¤t_hash = current_hash_opt.value(); - auto current_header_opt = + auto current_header = unwrapResult(fmt::format("Getting header of block #{}", current), block_storage->getBlockHeader(current_hash)); - if (!current_header_opt) { - throwError("Block header #{} not found", current); - } - searchBlock(out, current_header_opt.value(), target); + searchBlock(out, current_header, target); } } @@ -630,7 +616,7 @@ int storage_explorer_main(int argc, const char **argv) { auto configuration = std::make_shared(); - size_t kagome_args_start; // NOLINT(cppcoreguidelines-init-variables) + size_t kagome_args_start{}; bool is_found = false; for (size_t i = 1; i < args.size(); i++) { if (strcmp(args[i], "--") == 0) { diff --git a/test/core/api/service/chain/chain_api_test.cpp b/test/core/api/service/chain/chain_api_test.cpp index bb080bc47d..9801ff2dc0 100644 --- a/test/core/api/service/chain/chain_api_test.cpp +++ b/test/core/api/service/chain/chain_api_test.cpp @@ -11,7 +11,6 @@ #include "api/service/chain/requests/subscribe_finalized_heads.hpp" #include "mock/core/api/service/api_service_mock.hpp" #include "mock/core/api/service/chain/chain_api_mock.hpp" -#include "mock/core/blockchain/block_header_repository_mock.hpp" #include "mock/core/blockchain/block_storage_mock.hpp" #include "mock/core/blockchain/block_tree_mock.hpp" #include "primitives/block.hpp" @@ -28,7 +27,6 @@ using kagome::api::ChainApi; using kagome::api::ChainApiImpl; using kagome::api::ChainApiMock; using kagome::api::chain::request::SubscribeFinalizedHeads; -using kagome::blockchain::BlockHeaderRepositoryMock; using kagome::blockchain::BlockStorageMock; using kagome::blockchain::BlockTreeMock; using kagome::common::Buffer; @@ -44,13 +42,11 @@ using testing::Return; struct ChainApiTest : public ::testing::Test { void SetUp() override { - header_repo = std::make_shared(); block_tree = std::make_shared(); block_storage = std::make_shared(); api_service = std::make_shared(); api = std::make_shared( - header_repo, block_tree, block_storage, testutil::sptr_to_lazy(api_service)); @@ -62,7 +58,6 @@ struct ChainApiTest : public ::testing::Test { "0f82403bcd4f7d4d23ce04775d112cd5dede13633924de6cb048d2676e322950"_hash256; } - std::shared_ptr header_repo; std::shared_ptr block_tree; std::shared_ptr api_service; std::shared_ptr api; @@ -108,7 +103,7 @@ TEST_F(ChainApiTest, GetBlockHashNoParam) { */ TEST_F(ChainApiTest, GetBlockHashByNumber) { // kagome::primitives::BlockId did = "D"_hash256; - EXPECT_CALL(*header_repo, getHashByNumber(42)) + EXPECT_CALL(*block_tree, getHashByNumber(42)) .WillOnce(Return("CDE"_hash256)); EXPECT_OUTCOME_TRUE(r, api->getBlockHash(42)); @@ -121,7 +116,7 @@ TEST_F(ChainApiTest, GetBlockHashByNumber) { * @then the correct hash value is returned */ TEST_F(ChainApiTest, GetBlockHashByHexNumber) { - EXPECT_CALL(*header_repo, getHashByNumber(42)) + EXPECT_CALL(*block_tree, getHashByNumber(42)) .WillOnce(Return("CDE"_hash256)); EXPECT_OUTCOME_TRUE(r, api->getBlockHash("0x2a")); @@ -134,9 +129,9 @@ TEST_F(ChainApiTest, GetBlockHashByHexNumber) { * @then the correct vector of hash values is returned */ TEST_F(ChainApiTest, GetBlockHashArray) { - EXPECT_CALL(*header_repo, getHashByNumber(50)).WillOnce(Return(hash1)); - EXPECT_CALL(*header_repo, getHashByNumber(100)).WillOnce(Return(hash2)); - EXPECT_CALL(*header_repo, getHashByNumber(200)).WillOnce(Return(hash3)); + EXPECT_CALL(*block_tree, getHashByNumber(50)).WillOnce(Return(hash1)); + EXPECT_CALL(*block_tree, getHashByNumber(100)).WillOnce(Return(hash2)); + EXPECT_CALL(*block_tree, getHashByNumber(200)).WillOnce(Return(hash3)); std::vector> request_data = { 50, "0x64", 200}; EXPECT_OUTCOME_TRUE( @@ -153,7 +148,7 @@ TEST_F(ChainApiTest, GetBlockHashArray) { */ TEST_F(ChainApiTest, GetHeader) { BlockHash a = hash1; - EXPECT_CALL(*header_repo, getBlockHeader(a)).WillOnce(Return(*data.header)); + EXPECT_CALL(*block_tree, getBlockHeader(a)).WillOnce(Return(*data.header)); EXPECT_OUTCOME_TRUE(r, api->getHeader(std::string("0x") + hash1.toHex())); ASSERT_EQ(r, *data.header); @@ -169,7 +164,7 @@ TEST_F(ChainApiTest, GetHeaderLats) { EXPECT_CALL(*block_tree, getLastFinalized()) .WillOnce(Return(BlockInfo(42, hash1))); - EXPECT_CALL(*header_repo, getBlockHeader(a)).WillOnce(Return(*data.header)); + EXPECT_CALL(*block_tree, getBlockHeader(a)).WillOnce(Return(*data.header)); EXPECT_OUTCOME_TRUE(r, api->getHeader()); ASSERT_EQ(r, *data.header); diff --git a/test/core/api/service/child_state/child_state_api_test.cpp b/test/core/api/service/child_state/child_state_api_test.cpp index 400929040d..a6d598b3d4 100644 --- a/test/core/api/service/child_state/child_state_api_test.cpp +++ b/test/core/api/service/child_state/child_state_api_test.cpp @@ -9,7 +9,6 @@ #include "api/service/child_state/impl/child_state_api_impl.hpp" #include "core/storage/trie/polkadot_trie_cursor_dummy.hpp" #include "mock/core/api/service/child_state/child_state_api_mock.hpp" -#include "mock/core/blockchain/block_header_repository_mock.hpp" #include "mock/core/blockchain/block_tree_mock.hpp" #include "mock/core/runtime/core_mock.hpp" #include "mock/core/runtime/metadata_mock.hpp" @@ -22,7 +21,6 @@ #include "testutil/outcome.hpp" using kagome::api::ChildStateApiMock; -using kagome::blockchain::BlockHeaderRepositoryMock; using kagome::blockchain::BlockTreeMock; using kagome::common::Buffer; using kagome::primitives::BlockHash; @@ -44,14 +42,12 @@ namespace kagome::api { public: void SetUp() override { api_ = std::make_unique( - block_header_repo_, storage_, block_tree_, runtime_core_, metadata_); + storage_, block_tree_, runtime_core_, metadata_); } protected: std::shared_ptr storage_ = std::make_shared(); - std::shared_ptr block_header_repo_ = - std::make_shared(); std::shared_ptr block_tree_ = std::make_shared(); std::shared_ptr runtime_core_ = std::make_shared(); @@ -80,7 +76,7 @@ namespace kagome::api { EXPECT_CALL(*block_tree_, getLastFinalized()) .WillOnce(testing::Return(BlockInfo(42, "D"_hash256))); - EXPECT_CALL(*block_header_repo_, getBlockHeader("D"_hash256)) + EXPECT_CALL(*block_tree_, getBlockHeader("D"_hash256)) .WillOnce(testing::Return(makeBlockHeaderOfStateRoot("CDE"_hash256))); EXPECT_CALL(*storage_, getEphemeralBatchAt("CDE"_hash256)) .WillOnce(testing::Invoke([](auto &root) { @@ -107,7 +103,7 @@ namespace kagome::api { } TEST_F(ChildStateApiTest, GetStorageAt) { - EXPECT_CALL(*block_header_repo_, getBlockHeader("B"_hash256)) + EXPECT_CALL(*block_tree_, getBlockHeader("B"_hash256)) .WillOnce(testing::Return(makeBlockHeaderOfStateRoot("ABC"_hash256))); EXPECT_CALL(*storage_, getEphemeralBatchAt("ABC"_hash256)) .WillOnce(testing::Invoke([](auto &root) { @@ -151,7 +147,7 @@ namespace kagome::api { EXPECT_CALL(*block_tree_, getLastFinalized()) .WillOnce(Return(BlockInfo(10, block_hash))); - EXPECT_CALL(*block_header_repo_, getBlockHeader(block_hash)) + EXPECT_CALL(*block_tree_, getBlockHeader(block_hash)) .WillOnce(Return(makeBlockHeaderOfStateRoot("6789"_hash256))); EXPECT_CALL(*storage_, getEphemeralBatchAt("6789"_hash256)) .WillOnce(testing::Invoke([&](auto &root) { @@ -206,7 +202,7 @@ namespace kagome::api { EXPECT_CALL(*block_tree_, getLastFinalized()) .WillOnce(Return(BlockInfo{10, block_hash})); - EXPECT_CALL(*block_header_repo_, getBlockHeader(block_hash)) + EXPECT_CALL(*block_tree_, getBlockHeader(block_hash)) .WillOnce(Return(makeBlockHeaderOfStateRoot("6789"_hash256))); EXPECT_CALL(*storage_, getEphemeralBatchAt("6789"_hash256)) .WillOnce(testing::Invoke([&](auto &root) { @@ -258,7 +254,7 @@ namespace kagome::api { auto block_hash_opt = std::make_optional(block_hash); auto expected_result = "3030"_buf; - EXPECT_CALL(*block_header_repo_, getBlockHeader(block_hash)) + EXPECT_CALL(*block_tree_, getBlockHeader(block_hash)) .WillOnce(Return(makeBlockHeaderOfStateRoot("6789"_hash256))); auto batch = std::make_unique(); EXPECT_CALL(*storage_, getEphemeralBatchAt("6789"_hash256)) diff --git a/test/core/api/service/state/state_api_test.cpp b/test/core/api/service/state/state_api_test.cpp index 1acb19b02d..68a29aed47 100644 --- a/test/core/api/service/state/state_api_test.cpp +++ b/test/core/api/service/state/state_api_test.cpp @@ -13,7 +13,6 @@ #include "core/storage/trie/polkadot_trie_cursor_dummy.hpp" #include "mock/core/api/service/api_service_mock.hpp" #include "mock/core/api/service/state/state_api_mock.hpp" -#include "mock/core/blockchain/block_header_repository_mock.hpp" #include "mock/core/blockchain/block_tree_mock.hpp" #include "mock/core/runtime/core_mock.hpp" #include "mock/core/runtime/metadata_mock.hpp" @@ -29,7 +28,6 @@ using kagome::api::ApiServiceMock; using kagome::api::StateApiMock; -using kagome::blockchain::BlockHeaderRepositoryMock; using kagome::blockchain::BlockTreeMock; using kagome::common::Buffer; using kagome::primitives::BlockHash; @@ -63,7 +61,6 @@ namespace kagome::api { executor_ = std::make_shared( std::make_shared()); api_ = std::make_unique( - block_header_repo_, storage_, block_tree_, runtime_core_, @@ -75,8 +72,6 @@ namespace kagome::api { protected: std::shared_ptr storage_ = std::make_shared(); - std::shared_ptr block_header_repo_ = - std::make_shared(); std::shared_ptr block_tree_ = std::make_shared(); std::shared_ptr runtime_core_ = std::make_shared(); @@ -96,7 +91,7 @@ namespace kagome::api { TEST_F(StateApiTest, GetStorage) { EXPECT_CALL(*block_tree_, getLastFinalized()) .WillOnce(testing::Return(BlockInfo(42, "D"_hash256))); - EXPECT_CALL(*block_header_repo_, getBlockHeader("D"_hash256)) + EXPECT_CALL(*block_tree_, getBlockHeader("D"_hash256)) .WillOnce(testing::Return(makeBlockHeaderOfStateRoot("CDE"_hash256))); auto in_buf = "a"_buf; auto out_buf = "1"_buf; @@ -112,7 +107,7 @@ namespace kagome::api { EXPECT_OUTCOME_TRUE(r, api_->getStorage(key.view())) ASSERT_EQ(r.value(), "1"_buf); - EXPECT_CALL(*block_header_repo_, getBlockHeader("B"_hash256)) + EXPECT_CALL(*block_tree_, getBlockHeader("B"_hash256)) .WillOnce(testing::Return(makeBlockHeaderOfStateRoot("ABC"_hash256))); EXPECT_OUTCOME_TRUE(r1, api_->getStorageAt(key.view(), "B"_hash256)); @@ -123,7 +118,6 @@ namespace kagome::api { public: void SetUp() override { auto storage = std::make_shared(); - block_header_repo_ = std::make_shared(); block_tree_ = std::make_shared(); api_service_ = std::make_shared(); @@ -133,7 +127,6 @@ namespace kagome::api { std::make_shared()); api_ = std::make_shared( - block_header_repo_, storage, block_tree_, runtime_core, @@ -144,7 +137,7 @@ namespace kagome::api { EXPECT_CALL(*block_tree_, getLastFinalized()) .WillOnce(testing::Return(BlockInfo(42, "D"_hash256))); - EXPECT_CALL(*block_header_repo_, getBlockHeader("D"_hash256)) + EXPECT_CALL(*block_tree_, getBlockHeader("D"_hash256)) .WillOnce(testing::Return(makeBlockHeaderOfStateRoot("CDE"_hash256))); EXPECT_CALL(*storage, getEphemeralBatchAt(_)) @@ -160,7 +153,6 @@ namespace kagome::api { } protected: - std::shared_ptr block_header_repo_; std::shared_ptr block_tree_; std::shared_ptr api_service_; @@ -387,9 +379,9 @@ namespace kagome::api { std::vector block_range{from, "block2"_hash256, "block3"_hash256, to}; EXPECT_CALL(*block_tree_, getChainByBlocks(from, to)) .WillOnce(testing::Return(block_range)); - EXPECT_CALL(*block_header_repo_, getNumberByHash(from)) + EXPECT_CALL(*block_tree_, getNumberByHash(from)) .WillOnce(testing::Return(1)); - EXPECT_CALL(*block_header_repo_, getNumberByHash(to)) + EXPECT_CALL(*block_tree_, getNumberByHash(to)) .WillOnce(testing::Return(4)); for (auto &block_hash : block_range) { primitives::BlockHash state_root; @@ -397,7 +389,7 @@ namespace kagome::api { std::copy_if(s.begin(), s.end(), state_root.begin(), [](auto b) { return b != 0; }); - EXPECT_CALL(*block_header_repo_, getBlockHeader(block_hash)) + EXPECT_CALL(*block_tree_, getBlockHeader(block_hash)) .WillOnce(testing::Return(makeBlockHeaderOfStateRoot(state_root))); EXPECT_CALL(*storage_, getEphemeralBatchAt(state_root)) .WillOnce(testing::Invoke([&keys](auto &root) { @@ -431,9 +423,9 @@ namespace kagome::api { */ TEST_F(StateApiTest, HitsBlockRangeLimits) { primitives::BlockHash from{"from"_hash256}, to{"to"_hash256}; - EXPECT_CALL(*block_header_repo_, getNumberByHash(from)) + EXPECT_CALL(*block_tree_, getNumberByHash(from)) .WillOnce(Return(42)); - EXPECT_CALL(*block_header_repo_, getNumberByHash(to)) + EXPECT_CALL(*block_tree_, getNumberByHash(to)) .WillOnce(Return(42 + StateApiImpl::kMaxBlockRange + 1)); EXPECT_OUTCOME_FALSE( error, api_->queryStorage(std::vector({"some_key"_buf}), from, to)); @@ -467,7 +459,7 @@ namespace kagome::api { .WillOnce(testing::Return(block_range)); primitives::BlockHash state_root = "at_state"_hash256; - EXPECT_CALL(*block_header_repo_, getBlockHeader(at)) + EXPECT_CALL(*block_tree_, getBlockHeader(at)) .WillOnce(testing::Return(makeBlockHeaderOfStateRoot(state_root))); EXPECT_CALL(*storage_, getEphemeralBatchAt(state_root)) .WillOnce(testing::Invoke([&keys](auto &root) { diff --git a/test/core/authority_discovery/CMakeLists.txt b/test/core/authority_discovery/CMakeLists.txt index 13d98cd749..ec28d63586 100644 --- a/test/core/authority_discovery/CMakeLists.txt +++ b/test/core/authority_discovery/CMakeLists.txt @@ -13,3 +13,13 @@ target_link_libraries(address_publisher_test logger_for_tests network ) + +addtest(audi_query_test + query.cpp + ) +target_link_libraries(audi_query_test + address_publisher + key_store + logger_for_tests + storage + ) diff --git a/test/core/authority_discovery/query.cpp b/test/core/authority_discovery/query.cpp new file mode 100644 index 0000000000..e21ff337b7 --- /dev/null +++ b/test/core/authority_discovery/query.cpp @@ -0,0 +1,240 @@ +/** + * Copyright Quadrivium LLC + * All Rights Reserved + * SPDX-License-Identifier: Apache-2.0 + */ + +#include "authority_discovery/query/query_impl.hpp" + +#include +#include +#include +#include +#include +#include + +#include "authority_discovery/publisher/address_publisher.hpp" +#include "authority_discovery/query/audi_store_impl.hpp" +#include "mock/core/application/app_state_manager_mock.hpp" +#include "mock/core/blockchain/block_tree_mock.hpp" +#include "mock/core/crypto/ed25519_provider_mock.hpp" +#include "mock/core/crypto/key_store_mock.hpp" +#include "mock/core/crypto/sr25519_provider_mock.hpp" +#include "mock/core/network/protocols/parachain.hpp" +#include "mock/core/runtime/authority_discovery_api_mock.hpp" +#include "mock/libp2p/crypto/crypto_provider.hpp" +#include "mock/libp2p/protocol/kademlia/kademlia_mock.hpp" +#include "storage/in_memory/in_memory_spaced_storage.hpp" +#include "testutil/lazy.hpp" +#include "testutil/literals.hpp" +#include "testutil/prepare_loggers.hpp" + +using kagome::application::AppStateManagerMock; +using kagome::authority_discovery::audiEncode; +using kagome::authority_discovery::AudiStoreImpl; +using kagome::authority_discovery::QueryImpl; +using kagome::blockchain::BlockTreeMock; +using kagome::crypto::Ed25519ProviderMock; +using kagome::crypto::KeyStoreMock; +using kagome::crypto::Sr25519Keypair; +using kagome::crypto::Sr25519ProviderMock; +using kagome::network::ValidationProtocolReserve; +using kagome::network::ValidationProtocolReserveMock; +using kagome::runtime::AuthorityDiscoveryApiMock; +using kagome::storage::InMemorySpacedStorage; +using libp2p::HostMock; +using libp2p::Multiaddress; +using libp2p::PeerId; +using libp2p::basic::SchedulerMock; +using libp2p::crypto::CryptoProviderMock; +using libp2p::crypto::ProtobufKey; +using libp2p::crypto::marshaller::KeyMarshallerMock; +using libp2p::peer::AddressRepositoryMock; +using libp2p::peer::PeerInfo; +using libp2p::peer::PeerRepositoryMock; +using libp2p::protocol::kademlia::Kademlia; +using libp2p::protocol::kademlia::KademliaMock; +using testing::_; +using testing::Return; +using testing::ReturnRef; +using testutil::sptr_to_lazy; + +struct QueryTest : testing::Test { + static void SetUpTestCase() { + testutil::prepareLoggers(); + } + + void SetUp() override { + auto app_state_manager = std::make_shared(); + EXPECT_CALL(*app_state_manager, atLaunch(_)); + EXPECT_CALL(*block_tree_, bestBlock()); + EXPECT_CALL(*api_, authorities(_)) + .WillRepeatedly(Return(std::vector{audi_key_.public_key})); + EXPECT_CALL(*validation_protocol_, reserve(_, true)) + .Times(testing::AnyNumber()) + .WillRepeatedly([this](const PeerId &peer_id, bool) { + reserved_.emplace(peer_id); + }); + EXPECT_CALL(key_store_->sr25519(), getPublicKeys(_)) + .WillRepeatedly(Return(outcome::success())); + EXPECT_CALL(*sr25519_provider_, sign(_, _)) + .WillRepeatedly(Return(outcome::success())); + EXPECT_CALL(*sr25519_provider_, verify(_, _, _)).WillRepeatedly([this] { + return sig_ok_; + }); + EXPECT_CALL(*libp2p_crypto_provider_, verify(_, _, _)) + .WillRepeatedly([this] { return sig_ok_; }); + EXPECT_CALL(*ed25519_provider_, sign(_, _)) + .WillRepeatedly(Return(outcome::success())); + EXPECT_CALL(*key_marshaller_, unmarshalPublicKey(_)) + .WillRepeatedly(Return(outcome::success())); + EXPECT_CALL(*host_, getId()).WillRepeatedly(Return("b"_peerid)); + EXPECT_CALL(*host_, getPeerRepository()) + .WillRepeatedly(ReturnRef(peer_repo_)); + EXPECT_CALL(peer_repo_, getAddressRepository()) + .WillRepeatedly(ReturnRef(address_repo_)); + EXPECT_CALL(address_repo_, addAddresses(_, _, _)) + .WillRepeatedly(Return(outcome::success())); + EXPECT_CALL(*scheduler_, scheduleImpl(_, _, _)); + query_ = std::make_shared( + app_state_manager, + block_tree_, + api_, + sptr_to_lazy(validation_protocol_), + key_store_, + audi_store_, + sr25519_provider_, + libp2p_crypto_provider_, + key_marshaller_, + *host_, + sptr_to_lazy(kademlia_), + scheduler_); + query_->update().value(); + } + + auto info(size_t i) { + auto addr = Multiaddress::create( + fmt::format("/tcp/{}/p2p/{}", i, peer_id_.toBase58())) + .value(); + return PeerInfo{peer_id_, {addr}}; + } + + void receive(size_t i, std::optional time) { + auto raw = + audiEncode(ed25519_provider_, + sr25519_provider_, + {}, + key_pb_, + info(i), + audi_key_, + time ? std::make_optional(std::chrono::nanoseconds{*time}) + : std::nullopt) + .value(); + std::ignore = query_->validate(raw.first, raw.second); + } + + void expect(std::optional i) { + auto r = query_->get(audi_key_.public_key); + EXPECT_EQ(r, i ? std::make_optional(info(*i)) : std::nullopt); + } + + std::shared_ptr block_tree_ = + std::make_shared(); + std::shared_ptr api_ = + std::make_shared(); + std::shared_ptr validation_protocol_ = + std::make_shared(); + std::shared_ptr key_store_ = std::make_shared(); + std::shared_ptr audi_store_ = std::make_shared( + std::make_shared()); + std::shared_ptr sr25519_provider_ = + std::make_shared(); + std::shared_ptr libp2p_crypto_provider_ = + std::make_shared(); + std::shared_ptr key_marshaller_ = + std::make_shared(); + std::shared_ptr host_ = std::make_shared(); + std::shared_ptr kademlia_ = std::make_shared(); + std::shared_ptr scheduler_ = std::make_shared(); + std::shared_ptr query_; + Sr25519Keypair audi_key_; + ProtobufKey key_pb_{{0, 1}}; + PeerId peer_id_ = PeerId::fromPublicKey(key_pb_).value(); + std::shared_ptr ed25519_provider_ = + std::make_shared(); + AddressRepositoryMock address_repo_; + PeerRepositoryMock peer_repo_; + bool sig_ok_ = true; + std::set reserved_; +}; + +// https://github.com/paritytech/polkadot-sdk/blob/da2dd9b7737cb7c0dc9dc3dc74b384c719ea3306/polkadot/node/network/gossip-support/src/tests.rs#L812 +/** + * @given record about peer + * @when receive record + * @then connect to peer + */ +TEST_F(QueryTest, test_quickly_connect_to_authorities_that_changed_address) { + receive(1, std::nullopt); + EXPECT_TRUE(reserved_.contains(peer_id_)); +} + +// https://github.com/paritytech/polkadot-sdk/blob/da2dd9b7737cb7c0dc9dc3dc74b384c719ea3306/substrate/client/authority-discovery/src/worker/tests.rs#L707 +/** + * @given record without timestamp + * @when receive record + * @then record is inserted + */ +TEST_F(QueryTest, strict_accept_address_without_creation_time) { + expect(std::nullopt); + receive(1, std::nullopt); + expect(1); +} + +// https://github.com/paritytech/polkadot-sdk/blob/da2dd9b7737cb7c0dc9dc3dc74b384c719ea3306/substrate/client/authority-discovery/src/worker/tests.rs#L728 +/** + * @given old record + * @when receive new record + * @then new record overwrites old record + */ +TEST_F(QueryTest, keep_last_received_if_no_creation_time) { + receive(1, std::nullopt); + receive(2, std::nullopt); + expect(2); +} + +// https://github.com/paritytech/polkadot-sdk/blob/da2dd9b7737cb7c0dc9dc3dc74b384c719ea3306/substrate/client/authority-discovery/src/worker/tests.rs#L775 +/** + * @given record with invalid signature + * @when receive record + * @then record ignored + */ +TEST_F(QueryTest, records_with_incorrectly_signed_creation_time_are_ignored) { + sig_ok_ = false; + receive(1, std::nullopt); + expect(std::nullopt); +} + +// https://github.com/paritytech/polkadot-sdk/blob/da2dd9b7737cb7c0dc9dc3dc74b384c719ea3306/substrate/client/authority-discovery/src/worker/tests.rs#L829 +/** + * @given old record + * @when receive new record + * @then new record overwrites old record + */ +TEST_F(QueryTest, newer_records_overwrite_older_ones) { + receive(1, 1); + receive(2, 2); + expect(2); +} + +// https://github.com/paritytech/polkadot-sdk/blob/da2dd9b7737cb7c0dc9dc3dc74b384c719ea3306/substrate/client/authority-discovery/src/worker/tests.rs#L880 +/** + * @given new record + * @when receive old record + * @then old record is ignored + */ +TEST_F(QueryTest, older_records_dont_affect_newer_ones) { + receive(2, 2); + receive(1, 1); + expect(2); +} diff --git a/test/core/blockchain/CMakeLists.txt b/test/core/blockchain/CMakeLists.txt index a35257b67a..c28190d6b6 100644 --- a/test/core/blockchain/CMakeLists.txt +++ b/test/core/blockchain/CMakeLists.txt @@ -4,15 +4,6 @@ # SPDX-License-Identifier: Apache-2.0 # -addtest(block_header_repository_test - block_header_repository_test.cpp - ) -target_link_libraries(block_header_repository_test - blockchain - base_rocksdb_test - hasher - ) - addtest(block_tree_test block_tree_test.cpp ) diff --git a/test/core/blockchain/block_header_repository_test.cpp b/test/core/blockchain/block_header_repository_test.cpp deleted file mode 100644 index 1eee6d2ca6..0000000000 --- a/test/core/blockchain/block_header_repository_test.cpp +++ /dev/null @@ -1,152 +0,0 @@ -/** - * Copyright Quadrivium LLC - * All Rights Reserved - * SPDX-License-Identifier: Apache-2.0 - */ - -#include "blockchain/block_header_repository.hpp" - -#include -#include - -#include - -#include "blockchain/impl/block_header_repository_impl.hpp" -#include "blockchain/impl/storage_util.hpp" -#include "crypto/hasher/hasher_impl.hpp" -#include "scale/scale.hpp" -#include "testutil/literals.hpp" -#include "testutil/outcome.hpp" -#include "testutil/prepare_loggers.hpp" -#include "testutil/storage/base_rocksdb_test.hpp" - -using kagome::blockchain::BlockHeaderRepository; -using kagome::blockchain::BlockHeaderRepositoryImpl; -using kagome::blockchain::putToSpace; -using kagome::common::Buffer; -using kagome::common::Hash256; -using kagome::primitives::BlockHeader; -using kagome::primitives::BlockInfo; -using kagome::primitives::BlockNumber; -using kagome::storage::Space; - -class BlockHeaderRepository_Test : public test::BaseRocksDB_Test { - public: - static void SetUpTestCase() { - testutil::prepareLoggers(); - } - - BlockHeaderRepository_Test() - : BaseRocksDB_Test(fs::path("/tmp/blockheaderrepotest.rcksdb")) {} - - void SetUp() override { - open(); - - hasher_ = std::make_shared(); - header_repo_ = std::make_shared(rocks_, hasher_); - } - - outcome::result storeHeader(BlockNumber num, BlockHeader h) { - BlockHeader header = std::move(h); - header.number = num; - OUTCOME_TRY(enc_header, scale::encode(header)); - auto hash = hasher_->blake2b_256(enc_header); - OUTCOME_TRY(putToSpace(*rocks_, Space::kHeader, hash, Buffer{enc_header})); - - auto num_to_hash_key = kagome::blockchain::blockNumberToKey(num); - auto key_space = rocks_->getSpace(Space::kLookupKey); - OUTCOME_TRY(key_space->put(num_to_hash_key, hash)); - - return hash; - } - - BlockHeader getDefaultHeader() { - BlockHeader h{}; - h.number = 42; - h.extrinsics_root = "DEADBEEF"_hash256; - h.parent_hash = "ABCDEF"_hash256; - h.state_root = "010203"_hash256; - return h; - } - - std::shared_ptr hasher_; - std::shared_ptr header_repo_; -}; - -class BlockHeaderRepository_NumberParametrized_Test - : public BlockHeaderRepository_Test, - public testing::WithParamInterface {}; - -const std::vector ParamValues = {1, 42, 12345, 0, 0xFFFFFFFF}; - -/** - * @given HeaderBackend instance with several headers in the storage - * @when accessing a header that wasn't put into storage - * @then result is error - */ -TEST_F(BlockHeaderRepository_Test, UnexistingHeader) { - auto chosen_number = ParamValues[0]; - for (auto &c : ParamValues) { - if (c != chosen_number) { - EXPECT_OUTCOME_TRUE_1(storeHeader(c, getDefaultHeader())) - } - } - BlockHeader not_in_storage = getDefaultHeader(); - not_in_storage.number = chosen_number; - EXPECT_OUTCOME_TRUE(enc_header, scale::encode(not_in_storage)) - auto hash = hasher_->blake2b_256(enc_header); - EXPECT_OUTCOME_FALSE_1(header_repo_->getBlockHeader(hash)) - EXPECT_OUTCOME_FALSE_1(header_repo_->getHashById(chosen_number)) - EXPECT_OUTCOME_FALSE_1(header_repo_->getNumberById(hash)) - - // doesn't require access to storage, as it basically returns its argument - EXPECT_OUTCOME_TRUE_1(header_repo_->getHashById(hash)) - EXPECT_OUTCOME_TRUE_1(header_repo_->getNumberById(chosen_number)) -} - -/** - * @given HeaderBackend instance - * @when learning block hash by its number through HeaderBackend - * @then resulting hash is equal to the original hash of the block for both - * retrieval through getHashByNumber and getHashById - */ -TEST_P(BlockHeaderRepository_NumberParametrized_Test, GetHashByNumber) { - EXPECT_OUTCOME_TRUE(hash, storeHeader(GetParam(), getDefaultHeader())) - EXPECT_OUTCOME_TRUE(maybe_hash, header_repo_->getHashByNumber(GetParam())) - ASSERT_THAT(hash, testing::ContainerEq(maybe_hash)); - EXPECT_OUTCOME_TRUE(maybe_another_hash, header_repo_->getHashById(GetParam())) - ASSERT_THAT(hash, testing::ContainerEq(maybe_another_hash)); -} - -/** - * @given HeaderBackend instance - * @when learning block number by its hash through HeaderBackend - * @then resulting number is equal to the original block number for both - * retrieval through getNumberByHash and getNumberById - */ -TEST_P(BlockHeaderRepository_NumberParametrized_Test, GetNumberByHash) { - EXPECT_OUTCOME_TRUE(hash, storeHeader(GetParam(), getDefaultHeader())) - EXPECT_OUTCOME_TRUE(maybe_number, header_repo_->getNumberByHash(hash)) - ASSERT_EQ(GetParam(), maybe_number); - EXPECT_OUTCOME_TRUE(maybe_another_number, - header_repo_->getNumberById(GetParam())) - ASSERT_EQ(GetParam(), maybe_another_number); -} - -/** - * @given HeaderBackend instance - * @when retrieving a block header by its id - * @then the same header that was put into the storage is returned, regardless - * of whether the id contained a number or a hash - */ -TEST_P(BlockHeaderRepository_NumberParametrized_Test, GetHeader) { - EXPECT_OUTCOME_TRUE(hash, storeHeader(GetParam(), getDefaultHeader())) - EXPECT_OUTCOME_TRUE(header_by_hash, header_repo_->getBlockHeader(hash)) - auto header_should_be = getDefaultHeader(); - header_should_be.number = GetParam(); - ASSERT_EQ(header_by_hash, header_should_be); -} - -INSTANTIATE_TEST_SUITE_P(Numbers, - BlockHeaderRepository_NumberParametrized_Test, - testing::ValuesIn(ParamValues)); diff --git a/test/core/blockchain/block_storage_test.cpp b/test/core/blockchain/block_storage_test.cpp index ec6dd94c9a..87ad0ae0ce 100644 --- a/test/core/blockchain/block_storage_test.cpp +++ b/test/core/blockchain/block_storage_test.cpp @@ -173,6 +173,36 @@ TEST_F(BlockStorageTest, PutBlock) { ASSERT_OUTCOME_SUCCESS_TRY(block_storage->putBlock(block)); } +/* + * @given a block storage and a block that is not in storage yet + * @when trying to get a block from the storage + * @then an error is returned + */ +TEST_F(BlockStorageTest, GetBlockNotFound) { + ASSERT_OUTCOME_SUCCESS( + block_storage, + BlockStorageImpl::create(root_hash, spaced_storage, hasher)); + + EXPECT_OUTCOME_ERROR(get_res, + block_storage->getBlockHeader(genesis_block_hash), + BlockStorageError::HEADER_NOT_FOUND); +} + +/* + * @given a block storage and a block that is not in storage yet + * @when trying to get a block from the storage + * @then success value containing nullopt is returned + */ +TEST_F(BlockStorageTest, TryGetBlockNotFound) { + ASSERT_OUTCOME_SUCCESS( + block_storage, + BlockStorageImpl::create(root_hash, spaced_storage, hasher)); + + ASSERT_OUTCOME_SUCCESS(try_get_res, + block_storage->tryGetBlockHeader(genesis_block_hash)); + ASSERT_FALSE(try_get_res.has_value()); +} + /** * @given a block storage and a block that is not in storage yet * @when putting a block in the storage and underlying storage throws an diff --git a/test/core/blockchain/block_tree_test.cpp b/test/core/blockchain/block_tree_test.cpp index e58cc1c30c..befb57539f 100644 --- a/test/core/blockchain/block_tree_test.cpp +++ b/test/core/blockchain/block_tree_test.cpp @@ -3,7 +3,7 @@ * All Rights Reserved * SPDX-License-Identifier: Apache-2.0 */ - +#include #include #include "blockchain/impl/block_tree_impl.hpp" @@ -15,7 +15,6 @@ #include "crypto/hasher/hasher_impl.hpp" #include "mock/core/application/app_configuration_mock.hpp" #include "mock/core/application/app_state_manager_mock.hpp" -#include "mock/core/blockchain/block_header_repository_mock.hpp" #include "mock/core/blockchain/block_storage_mock.hpp" #include "mock/core/blockchain/justification_storage_policy.hpp" #include "mock/core/consensus/babe/babe_config_repository_mock.hpp" @@ -30,7 +29,6 @@ using namespace kagome; using application::AppConfigurationMock; using application::AppStateManagerMock; -using blockchain::BlockHeaderRepositoryMock; using blockchain::BlockStorageMock; using blockchain::BlockTreeError; using blockchain::BlockTreeImpl; @@ -114,12 +112,10 @@ struct BlockTreeTest : public testing::Test { return outcome::success(); })); - EXPECT_CALL(*header_repo_, getNumberByHash(kFinalizedBlockInfo.hash)) - .WillRepeatedly(Return(kFinalizedBlockInfo.number)); - - EXPECT_CALL(*header_repo_, getHashByNumber(_)) - .WillRepeatedly( - Invoke([&](const BlockNumber &n) -> outcome::result { + EXPECT_CALL(*storage_, getBlockHash(testing::Matcher(_))) + .WillRepeatedly(Invoke( + [&](BlockNumber n) + -> outcome::result> { auto it = num_to_hash_.find(n); if (it == num_to_hash_.end()) { return BlockTreeError::HEADER_NOT_FOUND; @@ -127,12 +123,10 @@ struct BlockTreeTest : public testing::Test { return it->second; })); - EXPECT_CALL(*header_repo_, + EXPECT_CALL(*storage_, getBlockHeader({finalized_block_header_.parent_hash})) .WillRepeatedly(Return(BlockTreeError::HEADER_NOT_FOUND)); - EXPECT_CALL(*header_repo_, getBlockHeader(kFinalizedBlockInfo.hash)) - .WillRepeatedly(Return(finalized_block_header_)); EXPECT_CALL(*storage_, getBlockHeader(kFinalizedBlockInfo.hash)) .WillRepeatedly(Return(finalized_block_header_)); @@ -170,9 +164,7 @@ struct BlockTreeTest : public testing::Test { std::make_shared(); block_tree_ = BlockTreeImpl::create(*app_config_, - header_repo_, storage_, - extrinsic_observer_, hasher_, chain_events_engine, ext_events_engine, @@ -237,8 +229,6 @@ struct BlockTreeTest : public testing::Test { // hash for header repo and number for block storage just because tests // currently require so - EXPECT_CALL(*header_repo_, getBlockHeader(hash)) - .WillRepeatedly(Return(header)); EXPECT_CALL(*storage_, getBlockHeader(hash)).WillRepeatedly(Return(header)); return {hash, header}; @@ -270,9 +260,6 @@ struct BlockTreeTest : public testing::Test { std::shared_ptr app_config_ = std::make_shared(); - std::shared_ptr header_repo_ = - std::make_shared(); - std::shared_ptr storage_ = std::make_shared(); @@ -703,10 +690,7 @@ TEST_F(BlockTreeTest, GetChainByBlockDescending) { new_block = Block{header, body}; auto hash2 = addBlock(new_block); - EXPECT_CALL(*header_repo_, getNumberByHash(kFinalizedBlockInfo.hash)) - .WillRepeatedly(Return(0)); - EXPECT_CALL(*header_repo_, getNumberByHash(hash2)).WillRepeatedly(Return(2)); - EXPECT_CALL(*header_repo_, getBlockHeader({kFinalizedBlockInfo.hash})) + EXPECT_CALL(*storage_, getBlockHeader({kFinalizedBlockInfo.hash})) .WillOnce(Return(BlockTreeError::HEADER_NOT_FOUND)); std::vector expected_chain{hash2, hash1}; @@ -719,21 +703,6 @@ TEST_F(BlockTreeTest, GetChainByBlockDescending) { ASSERT_EQ(chain, expected_chain); } -/** - * @given a block tree with one block in it - * @when trying to obtain the best chain that contais a block, which is - * present in the storage, but is not connected to the base block in the tree - * @then BLOCK_NOT_FOUND error is returned - */ -TEST_F(BlockTreeTest, GetBestChain_BlockNotFound) { - BlockInfo target(1337, "TargetBlock#1337"_hash256); - EXPECT_CALL(*header_repo_, getNumberByHash(target.hash)) - .WillRepeatedly(Return(BlockTreeError::EXISTING_BLOCK_NOT_FOUND)); - - ASSERT_OUTCOME_ERROR(block_tree_->getBestContaining(target.hash), - BlockTreeError::EXISTING_BLOCK_NOT_FOUND); -} - /** * @given a block tree with one block in it * @when trying to obtain the best chain that contais a block, which is @@ -743,9 +712,7 @@ TEST_F(BlockTreeTest, GetBestChain_BlockNotFound) { TEST_F(BlockTreeTest, GetBestChain_DiscardedBlock) { BlockInfo target = kFirstBlockInfo; BlockInfo other(kFirstBlockInfo.number, "OtherBlock#1"_hash256); - EXPECT_CALL(*header_repo_, getNumberByHash(target.hash)) - .WillRepeatedly(Return(target.number)); - EXPECT_CALL(*header_repo_, getHashByNumber(target.number)) + EXPECT_CALL(*storage_, getBlockHash(target.number)) .WillRepeatedly(Return(other.hash)); ASSERT_OUTCOME_ERROR(block_tree_->getBestContaining(target.hash), diff --git a/test/core/consensus/CMakeLists.txt b/test/core/consensus/CMakeLists.txt index d5f82b53d9..f71417bcd5 100644 --- a/test/core/consensus/CMakeLists.txt +++ b/test/core/consensus/CMakeLists.txt @@ -14,5 +14,4 @@ addtest(beefy_test target_link_libraries(beefy_test logger_for_tests network - in_memory_storage ) diff --git a/test/core/consensus/beefy_test.cpp b/test/core/consensus/beefy_test.cpp index a15824ce0d..7b8498c281 100644 --- a/test/core/consensus/beefy_test.cpp +++ b/test/core/consensus/beefy_test.cpp @@ -26,9 +26,9 @@ #include "mock/core/runtime/beefy_api.hpp" #include "network/impl/protocols/beefy_protocol_impl.hpp" #include "primitives/event_types.hpp" +#include "storage/in_memory/in_memory_spaced_storage.hpp" #include "testutil/lazy.hpp" #include "testutil/prepare_loggers.hpp" -#include "testutil/storage/in_memory/in_memory_spaced_storage.hpp" using kagome::beefyMmrDigest; using kagome::TestThreadPool; diff --git a/test/core/consensus/grandpa/CMakeLists.txt b/test/core/consensus/grandpa/CMakeLists.txt index 521c9cbaaa..5fbb7489dd 100644 --- a/test/core/consensus/grandpa/CMakeLists.txt +++ b/test/core/consensus/grandpa/CMakeLists.txt @@ -45,5 +45,4 @@ target_link_libraries(authority_manager_test dummy_error logger_for_tests storage - in_memory_storage ) diff --git a/test/core/consensus/grandpa/authority_manager_test.cpp b/test/core/consensus/grandpa/authority_manager_test.cpp index 975d68bf10..00337d2f62 100644 --- a/test/core/consensus/grandpa/authority_manager_test.cpp +++ b/test/core/consensus/grandpa/authority_manager_test.cpp @@ -12,7 +12,7 @@ #include "mock/core/application/app_state_manager_mock.hpp" #include "mock/core/blockchain/block_tree_mock.hpp" #include "mock/core/runtime/grandpa_api_mock.hpp" -#include "testutil/storage/in_memory/in_memory_spaced_storage.hpp" +#include "storage/in_memory/in_memory_spaced_storage.hpp" #include "testutil/prepare_loggers.hpp" diff --git a/test/core/consensus/grandpa/chain_test.cpp b/test/core/consensus/grandpa/chain_test.cpp index 076fdd1481..b96ba2ccb3 100644 --- a/test/core/consensus/grandpa/chain_test.cpp +++ b/test/core/consensus/grandpa/chain_test.cpp @@ -12,7 +12,6 @@ #include "consensus/grandpa/impl/environment_impl.hpp" #include "consensus/grandpa/justification_observer.hpp" #include "mock/core/application/app_state_manager_mock.hpp" -#include "mock/core/blockchain/block_header_repository_mock.hpp" #include "mock/core/blockchain/block_tree_mock.hpp" #include "mock/core/consensus/grandpa/authority_manager_mock.hpp" #include "mock/core/consensus/grandpa/grandpa_mock.hpp" @@ -32,8 +31,6 @@ using kagome::Watchdog; using kagome::application::StartApp; -using kagome::blockchain::BlockHeaderRepository; -using kagome::blockchain::BlockHeaderRepositoryMock; using kagome::blockchain::BlockTree; using kagome::blockchain::BlockTreeMock; using kagome::common::Blob; @@ -80,7 +77,6 @@ class ChainTest : public testing::Test { chain = std::make_shared( app_state_manager, tree, - header_repo, authority_manager, grandpa_transmitter, approved_ancestor, @@ -119,7 +115,7 @@ class ChainTest : public testing::Test { BlockHeader hh; hh.number = number; hh.parent_hash = parent; - EXPECT_CALL(*header_repo, getBlockHeader(hash)) + EXPECT_CALL(*tree, getBlockHeader(hash)) .WillRepeatedly(Return(hh)); }; @@ -138,8 +134,6 @@ class ChainTest : public testing::Test { } std::shared_ptr tree = std::make_shared(); - std::shared_ptr header_repo = - std::make_shared(); std::shared_ptr authority_manager = std::make_shared(); std::shared_ptr grandpa_transmitter = diff --git a/test/core/consensus/timeline/CMakeLists.txt b/test/core/consensus/timeline/CMakeLists.txt index 7de7fd975b..11e8e94da1 100644 --- a/test/core/consensus/timeline/CMakeLists.txt +++ b/test/core/consensus/timeline/CMakeLists.txt @@ -22,7 +22,6 @@ target_link_libraries(slots_util_test clock storage logger_for_tests - in_memory_storage ) addtest(timeline_test diff --git a/test/core/consensus/timeline/slots_util_test.cpp b/test/core/consensus/timeline/slots_util_test.cpp index 3b558464af..05c4046ce0 100644 --- a/test/core/consensus/timeline/slots_util_test.cpp +++ b/test/core/consensus/timeline/slots_util_test.cpp @@ -15,8 +15,8 @@ #include "mock/core/runtime/babe_api_mock.hpp" #include "mock/core/storage/spaced_storage_mock.hpp" #include "mock/core/storage/trie/trie_storage_mock.hpp" +#include "storage/in_memory/in_memory_storage.hpp" #include "testutil/prepare_loggers.hpp" -#include "testutil/storage/in_memory/in_memory_storage.hpp" using namespace kagome; using application::AppStateManagerMock; diff --git a/test/core/network/CMakeLists.txt b/test/core/network/CMakeLists.txt index 984754dcba..25924cfb60 100644 --- a/test/core/network/CMakeLists.txt +++ b/test/core/network/CMakeLists.txt @@ -36,7 +36,6 @@ target_link_libraries(state_protocol_observer_test logger_for_tests storage network - in_memory_storage ) addtest(sync_protocol_observer_test diff --git a/test/core/network/rpc_libp2p_test.cpp b/test/core/network/rpc_libp2p_test.cpp index 09d4f5fe8d..d4ee8977b1 100644 --- a/test/core/network/rpc_libp2p_test.cpp +++ b/test/core/network/rpc_libp2p_test.cpp @@ -137,7 +137,7 @@ TEST_F(RpcLibp2pTest, ReadWithoutResponse) { */ TEST_F(RpcLibp2pTest, WriteWithResponse) { EXPECT_CALL(host_, - newStream(peer_info_.id, libp2p::StreamProtocols{protocol_}, _)) + newStream(peer_info_, libp2p::StreamProtocols{protocol_}, _)) .WillOnce( testing::InvokeArgument<2>(StreamAndProtocol{stream_, protocol_})); @@ -165,7 +165,7 @@ TEST_F(RpcLibp2pTest, WriteWithResponse) { */ TEST_F(RpcLibp2pTest, WriteWithResponseErroredResponse) { EXPECT_CALL(host_, - newStream(peer_info_.id, libp2p::StreamProtocols{protocol_}, _)) + newStream(peer_info_, libp2p::StreamProtocols{protocol_}, _)) .WillOnce( testing::InvokeArgument<2>(StreamAndProtocol{stream_, protocol_})); @@ -195,7 +195,7 @@ TEST_F(RpcLibp2pTest, WriteWithResponseErroredResponse) { */ TEST_F(RpcLibp2pTest, WriteWithoutResponse) { EXPECT_CALL(host_, - newStream(peer_info_.id, libp2p::StreamProtocols{protocol_}, _)) + newStream(peer_info_, libp2p::StreamProtocols{protocol_}, _)) .WillOnce( testing::InvokeArgument<2>(StreamAndProtocol{stream_, protocol_})); diff --git a/test/core/network/state_protocol_observer_test.cpp b/test/core/network/state_protocol_observer_test.cpp index 1087207fd5..7c2693fdfb 100644 --- a/test/core/network/state_protocol_observer_test.cpp +++ b/test/core/network/state_protocol_observer_test.cpp @@ -12,6 +12,7 @@ #include "mock/core/blockchain/block_header_repository_mock.hpp" #include "mock/core/storage/trie_pruner/trie_pruner_mock.hpp" #include "network/types/state_request.hpp" +#include "storage/in_memory/in_memory_spaced_storage.hpp" #include "storage/trie/impl/trie_storage_backend_impl.hpp" #include "storage/trie/impl/trie_storage_impl.hpp" #include "storage/trie/polkadot_trie/polkadot_trie_factory_impl.hpp" @@ -22,7 +23,6 @@ #include "testutil/literals.hpp" #include "testutil/outcome.hpp" #include "testutil/prepare_loggers.hpp" -#include "testutil/storage/in_memory/in_memory_spaced_storage.hpp" using namespace kagome; diff --git a/test/core/parachain/CMakeLists.txt b/test/core/parachain/CMakeLists.txt index c6d2b59f20..a2f352b457 100644 --- a/test/core/parachain/CMakeLists.txt +++ b/test/core/parachain/CMakeLists.txt @@ -30,6 +30,19 @@ target_link_libraries(scope_test logger ) +addtest(candidates_test + candidates.cpp + ) + +target_link_libraries(candidates_test + prospective_parachains + validator_parachain + log_configurator + base_fs_test + key_store + logger + ) + addtest(fragment_chain_test fragment_chain.cpp ) diff --git a/test/core/parachain/availability/recovery_test.cpp b/test/core/parachain/availability/recovery_test.cpp index 5c460a3fd9..09db933a95 100644 --- a/test/core/parachain/availability/recovery_test.cpp +++ b/test/core/parachain/availability/recovery_test.cpp @@ -13,6 +13,7 @@ #include "mock/core/authority_discovery/query_mock.hpp" #include "mock/core/blockchain/block_tree_mock.hpp" #include "mock/core/crypto/hasher_mock.hpp" +#include "mock/core/crypto/session_keys_mock.hpp" #include "mock/core/network/peer_manager_mock.hpp" #include "mock/core/network/router_mock.hpp" #include "mock/core/parachain/availability_store_mock.hpp" @@ -30,6 +31,7 @@ using kagome::blockchain::BlockTreeMock; using kagome::common::Buffer; using kagome::crypto::BoostRandomGenerator; using kagome::crypto::HasherMock; +using kagome::crypto::SessionKeysMock; using kagome::network::CandidateHash; using kagome::network::CandidateReceipt; using kagome::network::Chunk; @@ -52,12 +54,14 @@ using kagome::runtime::ParachainHostMock; using kagome::runtime::SessionInfo; using kagome::parachain::makeTrieProof; +using kagome::parachain::minChunks; using kagome::parachain::toChunks; using libp2p::PeerId; using libp2p::peer::PeerInfo; using testing::_; +using testing::AnyNumber; using testing::Args; using testing::Invoke; using testing::Return; @@ -71,14 +75,23 @@ class RecoveryTest : public testing::Test { testutil::prepareLoggers(soralog::Level::TRACE); } - void SetUp() override { - size_t data_size = 8; - original_data.resize(data_size); - random_generator.fillRandomly(original_data); + void prepareAvailableData(size_t data_size) { + original_available_data.pov.payload.resize(data_size); + random_generator.fillRandomly(original_available_data.pov.payload); original_chunks = toChunks(n_validators, original_available_data).value(); receipt.descriptor.erasure_encoding_root = makeTrieProof(original_chunks); + required_chunk_number = minChunks(n_validators).value(); + tolerance_chunk_number = original_chunks.size() - required_chunk_number; + + auto candidate_hash = Recovery::HashedCandidateReceipt{receipt}.getHash(); + + EXPECT_CALL(*av_store, getChunk(candidate_hash, 1)) + .WillRepeatedly(Return(original_chunks[1])); + } + + void SetUp() override { session = SessionInfo{}; chain_spec = std::make_shared(); @@ -102,10 +115,13 @@ class RecoveryTest : public testing::Test { })); av_store = std::make_shared(); + query_audi = std::make_shared(); router = std::make_shared(); router->setReturningMockedProtocols(); + EXPECT_CALL(*router, getFetchAvailableDataProtocol).Times(AnyNumber()); + EXPECT_CALL(*router, getFetchChunkProtocol).Times(AnyNumber()); auto fetch_available_data = router->getMockedFetchAvailableDataProtocol(); ON_CALL(*fetch_available_data, doRequest(_, _, _)) @@ -129,6 +145,11 @@ class RecoveryTest : public testing::Test { peer_manager = std::make_shared(); + session_keys = std::make_shared(); + ON_CALL(*session_keys, getParaKeyPair(_)) + .WillByDefault(Return( + std::pair(std::shared_ptr{}, 1))); + recovery = std::make_shared(chain_spec, hasher, block_tree, @@ -136,7 +157,8 @@ class RecoveryTest : public testing::Test { av_store, query_audi, router, - peer_manager); + peer_manager, + session_keys); auto &val_group_0 = session.validator_groups.emplace_back(); for (size_t i = 0; i < n_validators; ++i) { @@ -167,10 +189,15 @@ class RecoveryTest : public testing::Test { std::optional>)>>(); } + void TearDown() override { + original_chunks.clear(); + } + BoostRandomGenerator random_generator; - size_t n_validators = 6; - Buffer original_data; + size_t n_validators = 10; + size_t required_chunk_number; + size_t tolerance_chunk_number; AvailableData original_available_data{}; std::vector original_chunks; @@ -209,6 +236,7 @@ class RecoveryTest : public testing::Test { std::shared_ptr query_audi; std::shared_ptr router; std::shared_ptr peer_manager; + std::shared_ptr session_keys; std::shared_ptr>)>> @@ -218,6 +246,8 @@ class RecoveryTest : public testing::Test { }; TEST_F(RecoveryTest, FullFromBakers_NoGroup) { + prepareAvailableData(2048); + std::optional backing_group = std::nullopt; std::optional core = std::nullopt; // value doesn't matter @@ -229,6 +259,8 @@ TEST_F(RecoveryTest, FullFromBakers_NoGroup) { } TEST_F(RecoveryTest, FullFromBakers_Success) { + prepareAvailableData(2048); + std::optional backing_group = 0; // any non nullopt value std::optional core = std::nullopt; // value doesn't matter @@ -257,6 +289,8 @@ TEST_F(RecoveryTest, FullFromBakers_Success) { } TEST_F(RecoveryTest, SystematicChunks_NoCore) { + prepareAvailableData(2048); + peer_state.req_chunk_version = kagome::network::ReqChunkVersion::V2; std::optional backing_group{}; // nullopt to skip full recovery std::optional core = std::nullopt; @@ -296,6 +330,8 @@ TEST_F(RecoveryTest, SystematicChunks_NoCore) { } TEST_F(RecoveryTest, SystematicChunks_Success) { + prepareAvailableData(2048); + peer_state.req_chunk_version = kagome::network::ReqChunkVersion::V2; std::optional backing_group{}; // nullopt to skip full recovery std::optional core = 0; @@ -335,6 +371,8 @@ TEST_F(RecoveryTest, SystematicChunks_Success) { } TEST_F(RecoveryTest, RegularChunks_Success) { + prepareAvailableData(2048); + peer_state.req_chunk_version = kagome::network::ReqChunkVersion::V2; std::optional backing_group{}; // nullopt to skip full recovery std::optional core = 0; @@ -377,7 +415,9 @@ TEST_F(RecoveryTest, RegularChunks_Success) { ASSERT_EQ(available_data, original_available_data); } -TEST_F(RecoveryTest, Failure) { +TEST_F(RecoveryTest, CorruptedChunk) { + prepareAvailableData(2048); + peer_state.req_chunk_version = kagome::network::ReqChunkVersion::V2; std::optional backing_group = 0; std::optional core = 0; @@ -400,11 +440,59 @@ TEST_F(RecoveryTest, Failure) { fetch_available_data_requests.pop(); } - // Trying to do chunks recovery, but not enough number chunks + // Trying to do chunks recovery, but chunks + // tolerated number are received corrupted while (not fetch_chunk_requests.empty()) { auto &[peer_id, req, cb] = fetch_chunk_requests.front(); const auto &ec_chunk = original_chunks[req.chunk_index]; - if (ec_chunk.index == 0) { + Chunk chunk{ + .data = ec_chunk.chunk, + .chunk_index = ec_chunk.index, + .proof = ec_chunk.proof, + }; + if (chunk.chunk_index < tolerance_chunk_number) { + chunk.data = {"corrupted chunk"_bytes}; + } + cb(chunk); + fetch_chunk_requests.pop(); + } + + testing::Mock::VerifyAndClear(callback.get()); + + ASSERT_TRUE(available_data_res_opt.has_value()); +} + +TEST_F(RecoveryTest, InsufficientChunks) { + prepareAvailableData(2048); + + peer_state.req_chunk_version = kagome::network::ReqChunkVersion::V2; + std::optional backing_group = 0; + std::optional core = 0; + + std::optional> available_data_res_opt; + + EXPECT_CALL(*callback, Call(_)) + .WillOnce(WithArgs<0>( + [&](std::optional> x) mutable { + available_data_res_opt = std::move(x); + })); + + recovery->recover( + receipt, session_index, backing_group, core, callback->AsStdFunction()); + + // Trying to recover full from bakers, but all return no-data + while (not fetch_available_data_requests.empty()) { + auto &[peer_id, req, cb] = fetch_available_data_requests.front(); + cb(kagome::network::Empty{}); + fetch_available_data_requests.pop(); + } + + // Trying to do chunks recovery, but chunks less than required number are + // received + while (not fetch_chunk_requests.empty()) { + auto &[peer_id, req, cb] = fetch_chunk_requests.front(); + const auto &ec_chunk = original_chunks[req.chunk_index]; + if (ec_chunk.index < required_chunk_number - 1) { Chunk chunk{ .data = ec_chunk.chunk, .chunk_index = ec_chunk.index, @@ -421,3 +509,216 @@ TEST_F(RecoveryTest, Failure) { ASSERT_FALSE(available_data_res_opt.has_value()); } + +TEST_F(RecoveryTest, DelayedChunks) { + prepareAvailableData(2048); + + peer_state.req_chunk_version = kagome::network::ReqChunkVersion::V2; + std::optional backing_group = 0; + std::optional core = 0; + + std::optional> available_data_res_opt; + + recovery->recover( + receipt, session_index, backing_group, core, callback->AsStdFunction()); + + // Trying to recover full from bakers, but all return no-data + while (not fetch_available_data_requests.empty()) { + auto &[peer_id, req, cb] = fetch_available_data_requests.front(); + cb(kagome::network::Empty{}); + fetch_available_data_requests.pop(); + } + + size_t handled_counter = 0; + + EXPECT_CALL(*callback, Call(_)).Times(0); + + // Trying to do chunks recovery, but handled chunks + // less than required number to emulate of delay + while (not fetch_chunk_requests.empty()) { + auto &[peer_id, req, cb] = fetch_chunk_requests.front(); + const auto &ec_chunk = original_chunks[req.chunk_index]; + Chunk chunk{ + .data = ec_chunk.chunk, + .chunk_index = ec_chunk.index, + .proof = ec_chunk.proof, + }; + cb(chunk); + fetch_chunk_requests.pop(); + + // Stop responding for required-1 to emulate of delay) + if (++handled_counter == required_chunk_number - 1) { + break; + } + } + + // Actually stopped for emulation of delay + ASSERT_EQ(handled_counter, required_chunk_number - 1); + + // Some requests still active (delayed) + ASSERT_FALSE(fetch_chunk_requests.empty()); + + // Available data still is not reconstructed + ASSERT_FALSE(available_data_res_opt.has_value()); + + testing::Mock::VerifyAndClear(callback.get()); + + EXPECT_CALL(*callback, Call(_)) + .WillOnce(WithArgs<0>( + [&](std::optional> x) mutable { + available_data_res_opt = std::move(x); + })); + + // After continue data should be completely reconstructed + while (not fetch_chunk_requests.empty()) { + auto &[peer_id, req, cb] = fetch_chunk_requests.front(); + const auto &ec_chunk = original_chunks[req.chunk_index]; + Chunk chunk{ + .data = ec_chunk.chunk, + .chunk_index = ec_chunk.index, + .proof = ec_chunk.proof, + }; + cb(chunk); + fetch_chunk_requests.pop(); + } + + testing::Mock::VerifyAndClear(callback.get()); + + ASSERT_TRUE(available_data_res_opt.has_value()); +} + +TEST_F(RecoveryTest, DuplicateChunk) { + prepareAvailableData(2048); + + peer_state.req_chunk_version = kagome::network::ReqChunkVersion::V2; + std::optional backing_group = 0; + std::optional core = 0; + + std::optional> available_data_res_opt; + + EXPECT_CALL(*callback, Call(_)) + .WillOnce(WithArgs<0>( + [&](std::optional> x) mutable { + available_data_res_opt = std::move(x); + })); + + recovery->recover( + receipt, session_index, backing_group, core, callback->AsStdFunction()); + + // Trying to recover full from bakers, but all return no-data + while (not fetch_available_data_requests.empty()) { + auto &[peer_id, req, cb] = fetch_available_data_requests.front(); + cb(kagome::network::Empty{}); + fetch_available_data_requests.pop(); + } + + // Trying to do chunks recovery, but for tolerated number + // of first requests returned one the same chunk + while (not fetch_chunk_requests.empty()) { + auto &[peer_id, req, cb] = fetch_chunk_requests.front(); + const auto &ec_chunk = + original_chunks[req.chunk_index < tolerance_chunk_number + ? 0 + : req.chunk_index]; + Chunk chunk{ + .data = ec_chunk.chunk, + .chunk_index = ec_chunk.index, + .proof = ec_chunk.proof, + }; + cb(chunk); + fetch_chunk_requests.pop(); + } + + testing::Mock::VerifyAndClear(callback.get()); + + ASSERT_TRUE(available_data_res_opt.has_value()); +} + +TEST_F(RecoveryTest, FailureHandling) { + prepareAvailableData(2048); + + peer_state.req_chunk_version = kagome::network::ReqChunkVersion::V2; + std::optional backing_group = 0; + std::optional core = 0; + + std::optional> available_data_res_opt; + + EXPECT_CALL(*callback, Call(_)) + .WillOnce(WithArgs<0>( + [&](std::optional> x) mutable { + available_data_res_opt = std::move(x); + })); + + recovery->recover( + receipt, session_index, backing_group, core, callback->AsStdFunction()); + + // Trying to recover full from bakers, but all return no-data + while (not fetch_available_data_requests.empty()) { + auto &[peer_id, req, cb] = fetch_available_data_requests.front(); + cb(kagome::network::Empty{}); + fetch_available_data_requests.pop(); + } + + size_t handled_counter = 0; + + // Trying to do chunks recovery, but tolerated number of requests are failed + while (not fetch_chunk_requests.empty()) { + auto &[peer_id, req, cb] = fetch_chunk_requests.front(); + if (handled_counter++ < tolerance_chunk_number) { + cb(kagome::network::ProtocolError::GONE); + } else { + const auto &ec_chunk = original_chunks[req.chunk_index]; + Chunk chunk{ + .data = ec_chunk.chunk, + .chunk_index = ec_chunk.index, + .proof = ec_chunk.proof, + }; + cb(chunk); + } + fetch_chunk_requests.pop(); + } + + testing::Mock::VerifyAndClear(callback.get()); + + ASSERT_TRUE(available_data_res_opt.has_value()); +} + +TEST_F(RecoveryTest, LargeScaleRecovery) { + prepareAvailableData(2 << 20); // 2Mb + + std::optional backing_group = 0; // any non nullopt value + std::optional core = std::nullopt; // value doesn't matter + + std::optional> available_data_res_opt; + + EXPECT_CALL(*callback, Call(_)) + .WillOnce(WithArgs<0>( + [&](std::optional> x) mutable { + available_data_res_opt = std::move(x); + })); + + recovery->recover( + receipt, session_index, backing_group, core, callback->AsStdFunction()); + + // We must not try to obtain big data from backers + EXPECT_TRUE(fetch_available_data_requests.empty()); + + // Trying to do systematic chunks recovery + while (not fetch_chunk_requests.empty()) { + auto &[peer_id, req, cb] = fetch_chunk_requests.front(); + const auto &ec_chunk = original_chunks[req.chunk_index]; + Chunk chunk{ + .data = ec_chunk.chunk, + .chunk_index = ec_chunk.index, + .proof = ec_chunk.proof, + }; + cb(chunk); + fetch_chunk_requests.pop(); + } + + testing::Mock::VerifyAndClear(callback.get()); + + ASSERT_TRUE(available_data_res_opt.has_value()); + ASSERT_OUTCOME_SUCCESS(available_data, available_data_res_opt.value()); + ASSERT_EQ(available_data, original_available_data); +} diff --git a/test/core/parachain/candidates.cpp b/test/core/parachain/candidates.cpp new file mode 100644 index 0000000000..b414e965e5 --- /dev/null +++ b/test/core/parachain/candidates.cpp @@ -0,0 +1,632 @@ +/** + * Copyright Quadrivium LLC + * All Rights Reserved + * SPDX-License-Identifier: Apache-2.0 + */ + +#include "parachain/validator/impl/candidates.hpp" +#include "core/parachain/parachain_test_harness.hpp" + +using namespace kagome::parachain; + +class CandidatesTest : public ProspectiveParachainsTestHarness { + void SetUp() override { + ProspectiveParachainsTestHarness::SetUp(); + } + + void TearDown() override { + ProspectiveParachainsTestHarness::TearDown(); + } + + public: + template + inline Hash hash_of(const T &t) { + return hasher_->blake2b_256(scale::encode(std::forward(t)).value()); + } + + inline Hash hash_of(const HeadData &t) { + return hasher_->blake2b_256(t); + } + + inline Hash hash_of(const network::CommittedCandidateReceipt &t) { + return hash(t); + } + + libp2p::PeerId getPeerFrom(uint64_t i) { + const auto s = fmt::format("Peer#{}", i); + libp2p::PeerId peer_id = operator""_peerid(s.data(), s.size()); + return peer_id; + } + + Hash from_low_u64_be(uint64_t v) { + Hash h{}; + const uint64_t value = LE_BE_SWAP64(v); + for (size_t i = 0; i < 8; ++i) { + h[32 - 8 + i] = ((value >> (i * 8)) & 0xff); + } + return h; + } +}; + +TEST_F(CandidatesTest, inserting_unconfirmed_rejects_on_incompatible_claims) { + const HeadData relay_head_data_a = {1, 2, 3}; + const HeadData relay_head_data_b = {4, 5, 6}; + + const auto relay_hash_a = hash_of(relay_head_data_a); + const auto relay_hash_b = hash_of(relay_head_data_b); + + const ParachainId para_id_a = 1; + const ParachainId para_id_b = 2; + + const auto &[candidate_a, pvd_a] = make_candidate(relay_hash_a, + 1, + para_id_a, + relay_head_data_a, + {1}, + from_low_u64_be(1000)); + + const auto candidate_hash_a = hash(candidate_a); + + const auto peer = getPeerFrom(1); + + const GroupIndex group_index_a = 100; + const GroupIndex group_index_b = 200; + + Candidates candidates; + + // Confirm a candidate first. + candidates.confirm_candidate( + candidate_hash_a, candidate_a, pvd_a, group_index_a, hasher_); + + // Relay parent does not match. + ASSERT_EQ( + candidates.insert_unconfirmed(peer, + candidate_hash_a, + relay_hash_b, + group_index_a, + std::make_pair(relay_hash_a, para_id_a)), + false); + + // Group index does not match. + ASSERT_EQ( + candidates.insert_unconfirmed(peer, + candidate_hash_a, + relay_hash_a, + group_index_b, + std::make_pair(relay_hash_a, para_id_a)), + false); + + // Parent head data does not match. + ASSERT_EQ( + candidates.insert_unconfirmed(peer, + candidate_hash_a, + relay_hash_a, + group_index_a, + std::make_pair(relay_hash_b, para_id_a)), + false); + + // Para ID does not match. + ASSERT_EQ( + candidates.insert_unconfirmed(peer, + candidate_hash_a, + relay_hash_a, + group_index_a, + std::make_pair(relay_hash_a, para_id_b)), + false); + + // Everything matches. + ASSERT_EQ( + candidates.insert_unconfirmed(peer, + candidate_hash_a, + relay_hash_a, + group_index_a, + std::make_pair(relay_hash_a, para_id_a)), + true); +} + +TEST_F(CandidatesTest, confirming_maintains_parent_hash_index) { + const HeadData relay_head_data = {1, 2, 3}; + const auto relay_hash = hash_of(relay_head_data); + + const HeadData candidate_head_data_a = {1}; + const HeadData candidate_head_data_b = {2}; + const HeadData candidate_head_data_c = {3}; + const HeadData candidate_head_data_d = {4}; + + const auto candidate_head_data_hash_a = hash_of(candidate_head_data_a); + const auto candidate_head_data_hash_b = hash_of(candidate_head_data_b); + const auto candidate_head_data_hash_c = hash_of(candidate_head_data_c); + + const auto &[candidate_a, pvd_a] = make_candidate(relay_hash, + 1, + 1, + relay_head_data, + candidate_head_data_a, + from_low_u64_be(1000)); + const auto &[candidate_b, pvd_b] = make_candidate(relay_hash, + 1, + 1, + candidate_head_data_a, + candidate_head_data_b, + from_low_u64_be(2000)); + const auto &[candidate_c, _] = make_candidate(relay_hash, + 1, + 1, + candidate_head_data_b, + candidate_head_data_c, + from_low_u64_be(3000)); + + const auto &[candidate_d, pvd_d] = make_candidate(relay_hash, + 1, + 1, + candidate_head_data_c, + candidate_head_data_d, + from_low_u64_be(4000)); + + const auto candidate_hash_a = hash_of(candidate_a); + const auto candidate_hash_b = hash_of(candidate_b); + const auto candidate_hash_c = hash_of(candidate_c); + const auto candidate_hash_d = hash_of(candidate_d); + + const auto peer = getPeerFrom(1); + const GroupIndex group_index = 100; + + Candidates candidates; + + // Insert some unconfirmed candidates. + + // Advertise A without parent hash. + ASSERT_TRUE(candidates.insert_unconfirmed( + peer, candidate_hash_a, relay_hash, group_index, std::nullopt)); + + ASSERT_EQ(candidates.by_parent, Candidates::ByRelayParent{}); + + // Advertise A with parent hash and ID. + ASSERT_TRUE(candidates.insert_unconfirmed(peer, + candidate_hash_a, + relay_hash, + group_index, + std::make_pair(relay_hash, 1))); + + auto ASSERT_RESULT = [&](const Candidates::ByRelayParent &ref) -> auto { + ASSERT_EQ(candidates.by_parent, ref); + }; + + ASSERT_RESULT({{relay_hash, {{1, {candidate_hash_a}}}}}); + + // Advertise B with parent A. + ASSERT_TRUE(candidates.insert_unconfirmed( + peer, + candidate_hash_b, + relay_hash, + group_index, + std::make_pair(candidate_head_data_hash_a, 1))); + ASSERT_RESULT({{relay_hash, {{1, {candidate_hash_a}}}}, + {candidate_head_data_hash_a, {{1, {candidate_hash_b}}}}}); + + // Advertise C with parent A. + ASSERT_TRUE(candidates.insert_unconfirmed( + peer, + candidate_hash_c, + relay_hash, + group_index, + std::make_pair(candidate_head_data_hash_a, 1))); + ASSERT_RESULT({{relay_hash, {{1, {candidate_hash_a}}}}, + {candidate_head_data_hash_a, + {{1, {candidate_hash_b, candidate_hash_c}}}}}); + + // Advertise D with parent A. + ASSERT_TRUE(candidates.insert_unconfirmed( + peer, + candidate_hash_d, + relay_hash, + group_index, + std::make_pair(candidate_head_data_hash_a, 1))); + ASSERT_RESULT( + {{relay_hash, {{1, {candidate_hash_a}}}}, + {candidate_head_data_hash_a, + {{1, {candidate_hash_b, candidate_hash_c, candidate_hash_d}}}}}); + + // Insert confirmed candidates and check parent hash index. + + // Confirmation matches advertisement. Index should be unchanged. + candidates.confirm_candidate( + candidate_hash_a, candidate_a, pvd_a, group_index, hasher_); + ASSERT_RESULT( + {{relay_hash, {{1, {candidate_hash_a}}}}, + {candidate_head_data_hash_a, + {{1, {candidate_hash_b, candidate_hash_c, candidate_hash_d}}}}}); + + candidates.confirm_candidate( + candidate_hash_b, candidate_b, pvd_b, group_index, hasher_); + ASSERT_RESULT( + {{relay_hash, {{1, {candidate_hash_a}}}}, + {candidate_head_data_hash_a, + {{1, {candidate_hash_b, candidate_hash_c, candidate_hash_d}}}}}); + + // Confirmation does not match advertisement. Index should be updated. + candidates.confirm_candidate( + candidate_hash_d, candidate_d, pvd_d, group_index, hasher_); + ASSERT_RESULT({{relay_hash, {{1, {candidate_hash_a}}}}, + {candidate_head_data_hash_a, + {{1, {candidate_hash_b, candidate_hash_c}}}}, + {candidate_head_data_hash_c, {{1, {candidate_hash_d}}}}}); + + // Make a new candidate for C with a different para ID. + const auto &[new_candidate_c, new_pvd_c] = + make_candidate(relay_hash, + 1, + 2, + candidate_head_data_b, + candidate_head_data_c, + from_low_u64_be(3000)); + candidates.confirm_candidate( + candidate_hash_c, new_candidate_c, new_pvd_c, group_index, hasher_); + ASSERT_RESULT({{relay_hash, {{1, {candidate_hash_a}}}}, + {candidate_head_data_hash_a, {{1, {candidate_hash_b}}}}, + {candidate_head_data_hash_b, {{2, {candidate_hash_c}}}}, + {candidate_head_data_hash_c, {{1, {candidate_hash_d}}}}}); +} + +TEST_F(CandidatesTest, test_returned_post_confirmation) { + const HeadData relay_head_data = {1, 2, 3}; + const auto relay_hash = hash_of(relay_head_data); + + const HeadData candidate_head_data_a = {1}; + const HeadData candidate_head_data_b = {2}; + const HeadData candidate_head_data_c = {3}; + const HeadData candidate_head_data_d = {4}; + + const auto candidate_head_data_hash_a = hash_of(candidate_head_data_a); + const auto candidate_head_data_hash_b = hash_of(candidate_head_data_b); + + const auto &[candidate_a, pvd_a] = make_candidate(relay_hash, + 1, + 1, + relay_head_data, + candidate_head_data_a, + from_low_u64_be(1000)); + const auto &[candidate_b, pvd_b] = make_candidate(relay_hash, + 1, + 1, + candidate_head_data_a, + candidate_head_data_b, + from_low_u64_be(2000)); + const auto &[candidate_c, _] = make_candidate(relay_hash, + 1, + 1, + candidate_head_data_a, + candidate_head_data_c, + from_low_u64_be(3000)); + const auto &[candidate_d, pvd_d] = make_candidate(relay_hash, + 1, + 1, + candidate_head_data_b, + candidate_head_data_d, + from_low_u64_be(4000)); + + const auto candidate_hash_a = hash_of(candidate_a); + const auto candidate_hash_b = hash_of(candidate_b); + const auto candidate_hash_c = hash_of(candidate_c); + const auto candidate_hash_d = hash_of(candidate_d); + + const auto peer_a = getPeerFrom(1); + const auto peer_b = getPeerFrom(2); + const auto peer_c = getPeerFrom(3); + const auto peer_d = getPeerFrom(4); + + const GroupIndex group_index = 100; + + Candidates candidates; + + // Insert some unconfirmed candidates. + + // Advertise A without parent hash. + ASSERT_TRUE(candidates.insert_unconfirmed( + peer_a, candidate_hash_a, relay_hash, group_index, std::nullopt)); + + // Advertise A with parent hash and ID. + ASSERT_TRUE(candidates.insert_unconfirmed(peer_a, + candidate_hash_a, + relay_hash, + group_index, + std::make_pair(relay_hash, 1))); + + // (Correctly) advertise B with parent A. Do it from a couple of peers. + ASSERT_TRUE(candidates.insert_unconfirmed( + peer_a, + candidate_hash_b, + relay_hash, + group_index, + std::make_pair(candidate_head_data_hash_a, 1))); + ASSERT_TRUE(candidates.insert_unconfirmed( + peer_b, + candidate_hash_b, + relay_hash, + group_index, + std::make_pair(candidate_head_data_hash_a, 1))); + + // (Wrongly) advertise C with parent A. Do it from a couple peers. + ASSERT_TRUE(candidates.insert_unconfirmed( + peer_b, + candidate_hash_c, + relay_hash, + group_index, + std::make_pair(candidate_head_data_hash_a, 1))); + ASSERT_TRUE(candidates.insert_unconfirmed( + peer_c, + candidate_hash_c, + relay_hash, + group_index, + std::make_pair(candidate_head_data_hash_a, 1))); + + // Advertise D. Do it correctly from one peer (parent B) and wrongly from + // another (parent A). + ASSERT_TRUE(candidates.insert_unconfirmed( + peer_c, + candidate_hash_d, + relay_hash, + group_index, + std::make_pair(candidate_head_data_hash_b, 1))); + ASSERT_TRUE(candidates.insert_unconfirmed( + peer_d, + candidate_hash_d, + relay_hash, + group_index, + std::make_pair(candidate_head_data_hash_a, 1))); + + auto ASSERT_RESULT = [&](const Candidates::ByRelayParent &ref) -> auto { + ASSERT_EQ(candidates.by_parent, ref); + }; + + ASSERT_RESULT( + {{relay_hash, {{1, {candidate_hash_a}}}}, + {candidate_head_data_hash_a, + {{1, {candidate_hash_b, candidate_hash_c, candidate_hash_d}}}}, + {candidate_head_data_hash_b, {{1, {candidate_hash_d}}}}}); + + // Insert confirmed candidates and check parent hash index. + + // Confirmation matches advertisement. + { + const auto post_confirmation = candidates.confirm_candidate( + candidate_hash_a, candidate_a, pvd_a, group_index, hasher_); + ASSERT_EQ(post_confirmation, + std::make_optional(PostConfirmation{ + .hypothetical = + HypotheticalCandidateComplete{ + .candidate_hash = candidate_hash_a, + .receipt = candidate_a, + .persisted_validation_data = pvd_a, + }, + .reckoning = + PostConfirmationReckoning{ + .correct = {peer_a}, + .incorrect = {}, + }, + })); + } + + { + const auto post_confirmation = candidates.confirm_candidate( + candidate_hash_b, candidate_b, pvd_b, group_index, hasher_); + ASSERT_EQ(post_confirmation, + std::make_optional(PostConfirmation{ + .hypothetical = + HypotheticalCandidateComplete{ + .candidate_hash = candidate_hash_b, + .receipt = candidate_b, + .persisted_validation_data = pvd_b, + }, + .reckoning = + PostConfirmationReckoning{ + .correct = {peer_a, peer_b}, + .incorrect = {}, + }, + })); + } + + // Confirm candidate with two wrong peers (different group index). + const auto &[new_candidate_c, new_pvd_c] = + make_candidate(relay_hash, + 1, + 2, + candidate_head_data_b, + candidate_head_data_c, + from_low_u64_be(3000)); + + { + const auto post_confirmation = candidates.confirm_candidate( + candidate_hash_c, new_candidate_c, new_pvd_c, group_index, hasher_); + ASSERT_EQ(post_confirmation, + std::make_optional(PostConfirmation{ + .hypothetical = + HypotheticalCandidateComplete{ + .candidate_hash = candidate_hash_c, + .receipt = new_candidate_c, + .persisted_validation_data = new_pvd_c, + }, + .reckoning = + PostConfirmationReckoning{ + .correct = {}, + .incorrect = {peer_b, peer_c}, + }, + })); + } + + // Confirm candidate with one wrong peer (different parent head data). + { + const auto post_confirmation = candidates.confirm_candidate( + candidate_hash_d, candidate_d, pvd_d, group_index, hasher_); + ASSERT_EQ(post_confirmation, + std::make_optional(PostConfirmation{ + .hypothetical = + HypotheticalCandidateComplete{ + .candidate_hash = candidate_hash_d, + .receipt = candidate_d, + .persisted_validation_data = pvd_d, + }, + .reckoning = + PostConfirmationReckoning{ + .correct = {peer_c}, + .incorrect = {peer_d}, + }, + })); + } +} + +TEST_F(CandidatesTest, test_hypothetical_frontiers) { + const HeadData relay_head_data = {1, 2, 3}; + const auto relay_hash = hash_of(relay_head_data); + + const HeadData candidate_head_data_a = {1}; + const HeadData candidate_head_data_b = {2}; + const HeadData candidate_head_data_c = {3}; + const HeadData candidate_head_data_d = {4}; + + const auto candidate_head_data_hash_a = hash_of(candidate_head_data_a); + const auto candidate_head_data_hash_b = hash_of(candidate_head_data_b); + const auto candidate_head_data_hash_d = hash_of(candidate_head_data_d); + + const auto &[candidate_a, pvd_a] = make_candidate(relay_hash, + 1, + 1, + relay_head_data, + candidate_head_data_a, + from_low_u64_be(1000)); + const auto &[candidate_b, _] = make_candidate(relay_hash, + 1, + 1, + candidate_head_data_a, + candidate_head_data_b, + from_low_u64_be(2000)); + const auto &[candidate_c, __] = make_candidate(relay_hash, + 1, + 1, + candidate_head_data_a, + candidate_head_data_c, + from_low_u64_be(3000)); + const auto &[candidate_d, ___] = make_candidate(relay_hash, + 1, + 1, + candidate_head_data_b, + candidate_head_data_d, + from_low_u64_be(4000)); + + const auto candidate_hash_a = hash_of(candidate_a); + const auto candidate_hash_b = hash_of(candidate_b); + const auto candidate_hash_c = hash_of(candidate_c); + const auto candidate_hash_d = hash_of(candidate_d); + + const auto peer = getPeerFrom(1); + const GroupIndex group_index = 100; + + Candidates candidates; + + // Confirm A. + candidates.confirm_candidate( + candidate_hash_a, candidate_a, pvd_a, group_index, hasher_); + + // Advertise B with parent A. + ASSERT_TRUE(candidates.insert_unconfirmed( + peer, + candidate_hash_b, + relay_hash, + group_index, + std::make_pair(candidate_head_data_hash_a, 1))); + + // Advertise C with parent A. + ASSERT_TRUE(candidates.insert_unconfirmed( + peer, + candidate_hash_c, + relay_hash, + group_index, + std::make_pair(candidate_head_data_hash_a, 1))); + + // Advertise D with parent B. + ASSERT_TRUE(candidates.insert_unconfirmed( + peer, + candidate_hash_d, + relay_hash, + group_index, + std::make_pair(candidate_head_data_hash_b, 1))); + + auto ASSERT_RESULT = [&](const Candidates::ByRelayParent &ref) -> auto { + ASSERT_EQ(candidates.by_parent, ref); + }; + + ASSERT_RESULT({{relay_hash, {{1, {candidate_hash_a}}}}, + {candidate_head_data_hash_a, + {{1, {candidate_hash_b, candidate_hash_c}}}}, + {candidate_head_data_hash_b, {{1, {candidate_hash_d}}}}}); + + const HypotheticalCandidateComplete hypothetical_a{ + .candidate_hash = candidate_hash_a, + .receipt = candidate_a, + .persisted_validation_data = pvd_a, + }; + const HypotheticalCandidateIncomplete hypothetical_b{ + .candidate_hash = candidate_hash_b, + .candidate_para = 1, + .parent_head_data_hash = candidate_head_data_hash_a, + .candidate_relay_parent = relay_hash, + }; + const HypotheticalCandidateIncomplete hypothetical_c{ + .candidate_hash = candidate_hash_c, + .candidate_para = 1, + .parent_head_data_hash = candidate_head_data_hash_a, + .candidate_relay_parent = relay_hash, + }; + const HypotheticalCandidateIncomplete hypothetical_d{ + .candidate_hash = candidate_hash_d, + .candidate_para = 1, + .parent_head_data_hash = candidate_head_data_hash_b, + .candidate_relay_parent = relay_hash, + }; + + auto CONTAINS = [](const std::vector &a, + const HypotheticalCandidate &v) -> bool { + for (const auto &ai : a) { + if (ai == v) { + return true; + } + } + return false; + }; + + { + const auto hypotheticals = + candidates.frontier_hypotheticals(std::make_pair(relay_hash, 1)); + ASSERT_EQ(hypotheticals.size(), 1); + ASSERT_TRUE(hypotheticals[0] == HypotheticalCandidate(hypothetical_a)); + } + + { + const auto hypotheticals = candidates.frontier_hypotheticals( + std::make_pair(candidate_head_data_hash_a, 2)); + ASSERT_EQ(hypotheticals.size(), 0); + } + + { + const auto hypotheticals = candidates.frontier_hypotheticals( + std::make_pair(candidate_head_data_hash_a, 1)); + ASSERT_EQ(hypotheticals.size(), 2); + ASSERT_TRUE(CONTAINS(hypotheticals, hypothetical_b)); + ASSERT_TRUE(CONTAINS(hypotheticals, hypothetical_c)); + } + + { + const auto hypotheticals = candidates.frontier_hypotheticals( + std::make_pair(candidate_head_data_hash_d, 1)); + ASSERT_EQ(hypotheticals.size(), 0); + } + + { + const auto hypotheticals = candidates.frontier_hypotheticals(std::nullopt); + ASSERT_EQ(hypotheticals.size(), 4); + ASSERT_TRUE(CONTAINS(hypotheticals, hypothetical_a)); + ASSERT_TRUE(CONTAINS(hypotheticals, hypothetical_b)); + ASSERT_TRUE(CONTAINS(hypotheticals, hypothetical_c)); + ASSERT_TRUE(CONTAINS(hypotheticals, hypothetical_d)); + } +} diff --git a/test/core/parachain/parachain_test_harness.hpp b/test/core/parachain/parachain_test_harness.hpp index 7957936b46..fdc71a7713 100644 --- a/test/core/parachain/parachain_test_harness.hpp +++ b/test/core/parachain/parachain_test_harness.hpp @@ -144,6 +144,116 @@ class ProspectiveParachainsTestHarness : public testing::Test { std::move(candidate)); } + HeadData dummy_head_data() { + return {}; + } + + network::CandidateCommitments dummy_candidate_commitments( + const std::optional &head_data) { + return network::CandidateCommitments{ + .upward_msgs = {}, + .outbound_hor_msgs = {}, + .opt_para_runtime = std::nullopt, + .para_head = (head_data ? *head_data : dummy_head_data()), + .downward_msgs_count = 0, + .watermark = 0, + }; + } + + /// Create meaningless validation code. + runtime::ValidationCode dummy_validation_code() { + return {1, 2, 3, 4, 5, 6, 7, 8, 9}; + } + + network::CandidateDescriptor dummy_candidate_descriptor_bad_sig( + const Hash &relay_parent) { + return network::CandidateDescriptor{ + .para_id = 0, + .relay_parent = relay_parent, + .reserved_1 = {}, + .persisted_data_hash = fromNumber(0), + .pov_hash = fromNumber(0), + .erasure_encoding_root = fromNumber(0), + .reserved_2 = {}, + .para_head_hash = fromNumber(0), + .validation_code_hash = + crypto::Hashed>( + dummy_validation_code()) + .getHash(), + }; + } + + network::CandidateReceipt dummy_candidate_receipt_bad_sig( + const Hash &relay_parent, const std::optional &commitments) { + const auto commitments_hash = [&]() -> Hash { + if (commitments) { + return *commitments; + } + return crypto::Hashed>( + dummy_candidate_commitments(dummy_head_data())) + .getHash(); + }(); + + network::CandidateReceipt receipt; + receipt.descriptor = dummy_candidate_descriptor_bad_sig(relay_parent); + receipt.commitments_hash = commitments_hash; + return receipt; + } + + runtime::PersistedValidationData dummy_pvd(const HeadData &parent_head, + uint32_t relay_parent_number) { + return runtime::PersistedValidationData{ + .parent_head = parent_head, + .relay_parent_number = relay_parent_number, + .relay_parent_storage_root = {}, + .max_pov_size = MAX_POV_SIZE, + }; + } + + std::pair + make_candidate(const Hash &relay_parent_hash, + BlockNumber relay_parent_number, + ParachainId para_id, + const HeadData &parent_head, + const HeadData &head_data, + const ValidationCodeHash &validation_code_hash) { + const runtime::PersistedValidationData pvd = + dummy_pvd(parent_head, relay_parent_number); + network::CandidateCommitments commitments{ + .upward_msgs = {}, + .outbound_hor_msgs = {}, + .opt_para_runtime = std::nullopt, + .para_head = head_data, + .downward_msgs_count = 0, + .watermark = relay_parent_number, + }; + + auto candidate = dummy_candidate_receipt_bad_sig(relay_parent_hash, Hash{}); + candidate.commitments_hash = + crypto::Hashed>(commitments) + .getHash(); + candidate.descriptor.para_id = para_id; + candidate.descriptor.persisted_data_hash = + crypto::Hashed>(pvd) + .getHash(); + candidate.descriptor.validation_code_hash = validation_code_hash; + return std::make_pair( + network::CommittedCandidateReceipt{ + .descriptor = candidate.descriptor, + .commitments = commitments, + }, + pvd); + } + static Hash fromNumber(uint64_t n) { assert(n <= 255); Hash h{}; diff --git a/test/core/parachain/prospective_parachains.cpp b/test/core/parachain/prospective_parachains.cpp index c3d74c173c..16bd50a422 100644 --- a/test/core/parachain/prospective_parachains.cpp +++ b/test/core/parachain/prospective_parachains.cpp @@ -142,7 +142,7 @@ class ProspectiveParachainsTest : public ProspectiveParachainsTestHarness { .WillRepeatedly(Return(ClaimQueueSnapshot{test_state.claim_queue})); } - EXPECT_CALL(*block_tree_, getBlockHeader(hash)) + EXPECT_CALL(*block_tree_, tryGetBlockHeader(hash)) .WillRepeatedly(Return(header)); const BlockNumber min_min = [&, number = number]() -> BlockNumber { @@ -191,7 +191,7 @@ class ProspectiveParachainsTest : public ProspectiveParachainsTestHarness { .digest = {}, .hash_opt = {}, }; - EXPECT_CALL(*block_tree_, getBlockHeader(h_)).WillRepeatedly(Return(h)); + EXPECT_CALL(*block_tree_, tryGetBlockHeader(h_)).WillRepeatedly(Return(h)); EXPECT_CALL(*parachain_api_, session_index_for_child(h_)) .WillRepeatedly(Return(outcome::success(1))); used_relay_parents.emplace(h_); @@ -233,7 +233,7 @@ class ProspectiveParachainsTest : public ProspectiveParachainsTestHarness { .hash_opt = {}, }; EXPECT_CALL(*block_tree_, - getBlockHeader(pending.descriptor.relay_parent)) + tryGetBlockHeader(pending.descriptor.relay_parent)) .WillRepeatedly(Return(h)); used_relay_parents.emplace(pending.descriptor.relay_parent); } @@ -316,76 +316,6 @@ class ProspectiveParachainsTest : public ProspectiveParachainsTestHarness { std::move(parent_hash_fn)); } - runtime::PersistedValidationData dummy_pvd(const HeadData &parent_head, - uint32_t relay_parent_number) { - return runtime::PersistedValidationData{ - .parent_head = parent_head, - .relay_parent_number = relay_parent_number, - .relay_parent_storage_root = {}, - .max_pov_size = MAX_POV_SIZE, - }; - } - - network::CandidateCommitments dummy_candidate_commitments( - const std::optional &head_data) { - return network::CandidateCommitments{ - .upward_msgs = {}, - .outbound_hor_msgs = {}, - .opt_para_runtime = std::nullopt, - .para_head = (head_data ? *head_data : dummy_head_data()), - .downward_msgs_count = 0, - .watermark = 0, - }; - } - - /// Create meaningless validation code. - runtime::ValidationCode dummy_validation_code() { - return {1, 2, 3, 4, 5, 6, 7, 8, 9}; - } - - network::CandidateDescriptor dummy_candidate_descriptor_bad_sig( - const Hash &relay_parent) { - return network::CandidateDescriptor{ - .para_id = 0, - .relay_parent = relay_parent, - .reserved_1 = {}, - .persisted_data_hash = fromNumber(0), - .pov_hash = fromNumber(0), - .erasure_encoding_root = fromNumber(0), - .reserved_2 = {}, - .para_head_hash = fromNumber(0), - .validation_code_hash = - crypto::Hashed>( - dummy_validation_code()) - .getHash(), - }; - } - - HeadData dummy_head_data() { - return {}; - } - - network::CandidateReceipt dummy_candidate_receipt_bad_sig( - const Hash &relay_parent, const std::optional &commitments) { - const auto commitments_hash = [&]() -> Hash { - if (commitments) { - return *commitments; - } - return crypto::Hashed>( - dummy_candidate_commitments(dummy_head_data())) - .getHash(); - }(); - - network::CandidateReceipt receipt; - receipt.descriptor = dummy_candidate_descriptor_bad_sig(relay_parent); - receipt.commitments_hash = commitments_hash; - return receipt; - } - auto get_backable_candidates( const TestLeaf &leaf, ParachainId para_id, @@ -464,46 +394,6 @@ class ProspectiveParachainsTest : public ProspectiveParachainsTestHarness { ASSERT_TRUE(resp.has_value()); ASSERT_EQ(resp.value(), expected_pvd); } - - std::pair - make_candidate(const Hash &relay_parent_hash, - BlockNumber relay_parent_number, - ParachainId para_id, - const HeadData &parent_head, - const HeadData &head_data, - const ValidationCodeHash &validation_code_hash) { - const runtime::PersistedValidationData pvd = - dummy_pvd(parent_head, relay_parent_number); - network::CandidateCommitments commitments{ - .upward_msgs = {}, - .outbound_hor_msgs = {}, - .opt_para_runtime = std::nullopt, - .para_head = head_data, - .downward_msgs_count = 0, - .watermark = relay_parent_number, - }; - - auto candidate = dummy_candidate_receipt_bad_sig(relay_parent_hash, Hash{}); - candidate.commitments_hash = - crypto::Hashed>(commitments) - .getHash(); - candidate.descriptor.para_id = para_id; - candidate.descriptor.persisted_data_hash = - crypto::Hashed>(pvd) - .getHash(); - candidate.descriptor.validation_code_hash = validation_code_hash; - return std::make_pair( - network::CommittedCandidateReceipt{ - .descriptor = candidate.descriptor, - .commitments = commitments, - }, - pvd); - } }; TEST_F(ProspectiveParachainsTest, @@ -1978,7 +1868,7 @@ TEST_F(ProspectiveParachainsTest, uses_ancestry_only_within_session) { EXPECT_CALL(*parachain_api_, claim_queue(hash)) .WillRepeatedly(Return(ClaimQueueSnapshot{})); - EXPECT_CALL(*block_tree_, getBlockHeader(hash)) + EXPECT_CALL(*block_tree_, tryGetBlockHeader(hash)) .WillRepeatedly(Return(BlockHeader{ .number = number, .parent_hash = get_parent_hash(hash), @@ -1997,7 +1887,7 @@ TEST_F(ProspectiveParachainsTest, uses_ancestry_only_within_session) { for (size_t i = 0; i < ancestry_hashes.size(); ++i) { const auto &h = ancestry_hashes[i]; const BlockNumber n = number - (i + 1); - EXPECT_CALL(*block_tree_, getBlockHeader(h)) + EXPECT_CALL(*block_tree_, tryGetBlockHeader(h)) .WillRepeatedly(Return(BlockHeader{ .number = n, .parent_hash = get_parent_hash(h), diff --git a/test/core/runtime/CMakeLists.txt b/test/core/runtime/CMakeLists.txt index 290486c4ed..93e349cde5 100644 --- a/test/core/runtime/CMakeLists.txt +++ b/test/core/runtime/CMakeLists.txt @@ -37,7 +37,6 @@ target_link_libraries(trie_storage_provider_test trie_storage_provider storage logger_for_tests - in_memory_storage ) addtest(uncompress_code_test @@ -86,7 +85,6 @@ target_link_libraries(runtime_upgrade_tracker_test logger log_configurator scale::scale - in_memory_storage ) addtest(instance_pool_test instance_pool_test.cpp) diff --git a/test/core/runtime/binaryen/CMakeLists.txt b/test/core/runtime/binaryen/CMakeLists.txt index 1f54c1a350..6f65ecf3bb 100644 --- a/test/core/runtime/binaryen/CMakeLists.txt +++ b/test/core/runtime/binaryen/CMakeLists.txt @@ -37,7 +37,6 @@ target_link_libraries(tagged_transaction_queue_test core_api_factory logger_for_tests filesystem - in_memory_storage ) addtest(block_builder_api_test @@ -50,7 +49,6 @@ target_link_libraries(block_builder_api_test core_api filesystem logger_for_tests - in_memory_storage ) addtest(binaryen_parachain_test @@ -66,7 +64,6 @@ target_link_libraries(binaryen_parachain_test hasher pbkdf2_provider network - in_memory_storage logger_for_tests ) @@ -81,7 +78,6 @@ target_link_libraries(metadata_test core_api core_api_factory logger_for_tests - in_memory_storage ) addtest(runtime_external_interface_test diff --git a/test/core/runtime/binaryen/metadata_test.cpp b/test/core/runtime/binaryen/metadata_test.cpp index df3a09d588..3bf9cabde9 100644 --- a/test/core/runtime/binaryen/metadata_test.cpp +++ b/test/core/runtime/binaryen/metadata_test.cpp @@ -19,6 +19,7 @@ using ::testing::_; using ::testing::Return; +using kagome::blockchain::BlockTreeMock; using kagome::primitives::BlockHeader; using kagome::primitives::BlockId; using kagome::primitives::BlockInfo; @@ -39,7 +40,7 @@ class MetadataTest : public BinaryenRuntimeTest { prepareEphemeralStorageExpects(); api_ = std::make_shared( - executor_, header_repo_, runtime_upgrade_tracker_); + executor_, block_tree_, runtime_upgrade_tracker_); } protected: @@ -55,9 +56,9 @@ class MetadataTest : public BinaryenRuntimeTest { */ TEST_F(MetadataTest, metadata) { BlockInfo info{42, "block_hash"_hash256}; - EXPECT_CALL(*header_repo_, getBlockHeader(info.hash)) + EXPECT_CALL(*block_tree_, getBlockHeader(info.hash)) .WillRepeatedly(Return(BlockHeader{info.number, {}, {}, {}, {}})); - EXPECT_CALL(*header_repo_, getNumberByHash(info.hash)) + EXPECT_CALL(*block_tree_, getNumberByHash(info.hash)) .WillOnce(Return(info.number)); EXPECT_CALL(*runtime_upgrade_tracker_, getLastCodeUpdateState(info)) .WillOnce(Return(info.hash)); diff --git a/test/core/runtime/binaryen/runtime_external_interface_test.cpp b/test/core/runtime/binaryen/runtime_external_interface_test.cpp index 5c4ceb40fc..430035b2c5 100644 --- a/test/core/runtime/binaryen/runtime_external_interface_test.cpp +++ b/test/core/runtime/binaryen/runtime_external_interface_test.cpp @@ -10,7 +10,6 @@ #include #include "crypto/key_store/key_type.hpp" -#include "mock/core/blockchain/block_header_repository_mock.hpp" #include "mock/core/host_api/host_api_factory_mock.hpp" #include "mock/core/host_api/host_api_mock.hpp" #include "mock/core/runtime/binaryen_wasm_memory_factory_mock.hpp" @@ -27,7 +26,6 @@ using ::testing::_; using ::testing::Invoke; using ::testing::Return; -using kagome::blockchain::BlockHeaderRepositoryMock; using kagome::crypto::KeyType; using kagome::crypto::KeyTypes; using kagome::host_api::HostApi; @@ -88,7 +86,6 @@ class REITest : public ::testing::Test { core_api_factory_ = std::make_shared(); memory_provider_ = std::make_shared(); auto module_repo = std::make_shared(); - auto header_repo = std::make_shared(); } void executeWasm(std::string call_code) { diff --git a/test/core/runtime/executor_test.cpp b/test/core/runtime/executor_test.cpp index 06cdacae17..bf666e2a9a 100644 --- a/test/core/runtime/executor_test.cpp +++ b/test/core/runtime/executor_test.cpp @@ -9,7 +9,7 @@ #include #include "filesystem/common.hpp" -#include "mock/core/blockchain/block_header_repository_mock.hpp" +#include "mock/core/blockchain/block_tree_mock.hpp" #include "mock/core/host_api/host_api_mock.hpp" #include "mock/core/runtime/memory_provider_mock.hpp" #include "mock/core/runtime/module_instance_mock.hpp" @@ -25,8 +25,7 @@ #include "testutil/runtime/common/basic_code_provider.hpp" #include "testutil/runtime/memory.hpp" -using kagome::blockchain::BlockHeaderRepository; -using kagome::blockchain::BlockHeaderRepositoryMock; +using kagome::blockchain::BlockTreeMock; using kagome::common::Buffer; using kagome::host_api::HostApiMock; using kagome::runtime::BasicCodeProvider; @@ -55,7 +54,7 @@ class ExecutorTest : public testing::Test { } void SetUp() override { - header_repo_ = std::make_shared(); + block_tree_ = std::make_shared(); auto code_provider = std::make_shared( kagome::filesystem::path(__FILE__).parent_path().string() @@ -73,7 +72,7 @@ class ExecutorTest : public testing::Test { storage_ = std::make_shared(); ctx_factory_ = std::make_shared( - module_repo_, header_repo_); + module_repo_, block_tree_); } enum class CallType { Persistent, Ephemeral }; @@ -84,7 +83,7 @@ class ExecutorTest : public testing::Test { CallType type, const Buffer &encoded_args, int res) { - EXPECT_CALL(*header_repo_, getBlockHeader(blockchain_state.hash)) + EXPECT_CALL(*block_tree_, getBlockHeader(blockchain_state.hash)) .WillRepeatedly(Return(kagome::primitives::BlockHeader{ blockchain_state.number, // number {}, // parent @@ -139,9 +138,9 @@ class ExecutorTest : public testing::Test { protected: TestMemory memory_; + std::shared_ptr block_tree_; std::shared_ptr ctx_factory_; std::shared_ptr cache_; - std::shared_ptr header_repo_; std::shared_ptr storage_; std::shared_ptr module_repo_; }; diff --git a/test/core/runtime/runtime_test_base.hpp b/test/core/runtime/runtime_test_base.hpp index 08c37f7284..4c4d61b22f 100644 --- a/test/core/runtime/runtime_test_base.hpp +++ b/test/core/runtime/runtime_test_base.hpp @@ -26,8 +26,7 @@ #include "host_api/impl/host_api_factory_impl.hpp" #include "mock/core/application/app_configuration_mock.hpp" #include "mock/core/application/app_state_manager_mock.hpp" -#include "mock/core/blockchain/block_header_repository_mock.hpp" -#include "mock/core/blockchain/block_storage_mock.hpp" +#include "mock/core/blockchain/block_tree_mock.hpp" #include "mock/core/offchain/offchain_persistent_storage_mock.hpp" #include "mock/core/offchain/offchain_worker_pool_mock.hpp" #include "mock/core/runtime/runtime_properties_cache_mock.hpp" @@ -49,10 +48,10 @@ #include "runtime/module.hpp" #include "runtime/runtime_context.hpp" #include "runtime/wabt/instrument.hpp" +#include "storage/in_memory/in_memory_storage.hpp" #include "testutil/literals.hpp" #include "testutil/outcome.hpp" #include "testutil/runtime/common/basic_code_provider.hpp" -#include "testutil/storage/in_memory/in_memory_storage.hpp" using kagome::application::AppConfigurationMock; using kagome::runtime::RuntimeInstancesPoolImpl; @@ -136,12 +135,12 @@ class RuntimeTestBaseImpl { offchain_storage_, offchain_worker_pool_); - header_repo_ = std::make_shared< - testing::NiceMock>(); + block_tree_ = + std::make_shared>(); - ON_CALL(*header_repo_, getHashByNumber(0)) + ON_CALL(*block_tree_, getHashByNumber(0)) .WillByDefault(testing::Return("genesis_hash"_hash256)); - EXPECT_CALL(*header_repo_, getBlockHeader("genesis_hash"_hash256)) + EXPECT_CALL(*block_tree_, getBlockHeader("genesis_hash"_hash256)) .WillRepeatedly(testing::Return(primitives::BlockHeader{ 0, // number {}, // parent_hash @@ -179,10 +178,9 @@ class RuntimeTestBaseImpl { std::shared_ptr upgrade_tracker = runtime::RuntimeUpgradeTrackerImpl::create( - header_repo_, spaced_storage, std::make_shared(), - std::make_shared()) + block_tree_) .value(); auto wasm_cache_dir = @@ -197,14 +195,14 @@ class RuntimeTestBaseImpl { auto module_repo = std::make_shared(instance_pool_, hasher_, - header_repo_, + block_tree_, upgrade_tracker, trie_storage_, module_factory, wasm_provider_); ctx_factory_ = std::make_shared( - module_repo, header_repo_); + module_repo, block_tree_); executor_ = std::make_shared(ctx_factory_, cache_); } @@ -257,14 +255,14 @@ class RuntimeTestBaseImpl { Digest digest{}; - ON_CALL(*header_repo_, getHashByNumber(number)) + ON_CALL(*block_tree_, getHashByNumber(number)) .WillByDefault(testing::Return(hash)); - ON_CALL(*header_repo_, getNumberByHash(hash)) + ON_CALL(*block_tree_, getNumberByHash(hash)) .WillByDefault(testing::Return(number)); BlockHeader header{ number, parent_hash, state_root, extrinsics_root, digest}; - EXPECT_CALL(*header_repo_, getBlockHeader(hash)) + EXPECT_CALL(*block_tree_, getBlockHeader(hash)) .WillRepeatedly(testing::Return(header)); return header; } @@ -280,8 +278,7 @@ class RuntimeTestBaseImpl { protected: AppConfigurationMock app_config_; - std::shared_ptr> - header_repo_; + std::shared_ptr> block_tree_; std::shared_ptr wasm_provider_; std::shared_ptr trie_storage_; std::shared_ptr serializer_; diff --git a/test/core/runtime/runtime_upgrade_tracker_test.cpp b/test/core/runtime/runtime_upgrade_tracker_test.cpp index ca9435c7c7..acc90fe7d8 100644 --- a/test/core/runtime/runtime_upgrade_tracker_test.cpp +++ b/test/core/runtime/runtime_upgrade_tracker_test.cpp @@ -8,15 +8,12 @@ #include -#include "mock/core/blockchain/block_header_repository_mock.hpp" -#include "mock/core/blockchain/block_storage_mock.hpp" #include "mock/core/blockchain/block_tree_mock.hpp" #include "mock/core/storage/spaced_storage_mock.hpp" -#include "storage/buffer_map_types.hpp" +#include "storage/in_memory/in_memory_storage.hpp" #include "testutil/literals.hpp" #include "testutil/outcome.hpp" #include "testutil/prepare_loggers.hpp" -#include "testutil/storage/in_memory/in_memory_storage.hpp" using kagome::common::Hash256; using std::string_literals::operator""s; @@ -60,8 +57,6 @@ class RuntimeUpgradeTrackerTest : public testing::Test { } void SetUp() override { - header_repo_ = - std::make_shared(); block_tree_ = std::make_shared(); buffer_storage_ = std::make_shared(); storage_ = std::make_shared(); @@ -73,25 +68,21 @@ class RuntimeUpgradeTrackerTest : public testing::Test { std::make_shared(); sub_engine_ = std::make_shared(); - block_storage_ = std::make_shared(); - tracker_ = - kagome::runtime::RuntimeUpgradeTrackerImpl::create( - header_repo_, storage_, known_code_substitutes_, block_storage_) - .value(); + tracker_ = kagome::runtime::RuntimeUpgradeTrackerImpl::create( + storage_, known_code_substitutes_, block_tree_) + .value(); } protected: std::unique_ptr tracker_; - std::shared_ptr header_repo_; std::shared_ptr block_tree_; std::shared_ptr sub_engine_; - std::shared_ptr buffer_storage_; + std::shared_ptr buffer_storage_; std::shared_ptr storage_; std::shared_ptr known_code_substitutes_{}; - std::shared_ptr block_storage_; kagome::primitives::BlockInfo genesis_block{0, "block_genesis_hash"_hash256}; kagome::primitives::BlockHeader genesis_block_header{ 0, // number @@ -114,7 +105,7 @@ class RuntimeUpgradeTrackerTest : public testing::Test { * THEN first encountered state is returned */ TEST_F(RuntimeUpgradeTrackerTest, NullBlockTree) { - EXPECT_CALL(*header_repo_, getBlockHeader({block_42.hash})) + EXPECT_CALL(*block_tree_, getBlockHeader({block_42.hash})) .WillOnce(testing::Return(block_42_header)); EXPECT_OUTCOME_TRUE(state, tracker_->getLastCodeUpdateState(block_42)); ASSERT_EQ(state, block_42_header.state_root); @@ -127,10 +118,11 @@ TEST_F(RuntimeUpgradeTrackerTest, NullBlockTree) { * returned */ TEST_F(RuntimeUpgradeTrackerTest, EmptyUpdatesCache) { - tracker_->subscribeToBlockchainEvents(sub_engine_, block_tree_); + tracker_->subscribeToBlockchainEvents(sub_engine_); - EXPECT_CALL(*header_repo_, getBlockHeader(block_42.hash)) + EXPECT_CALL(*block_tree_, getBlockHeader(block_42.hash)) .WillOnce(testing::Return(block_42_header)); + EXPECT_OUTCOME_TRUE(state, tracker_->getLastCodeUpdateState(block_42)); ASSERT_EQ(state, block_42_header.state_root); } @@ -141,9 +133,9 @@ TEST_F(RuntimeUpgradeTrackerTest, EmptyUpdatesCache) { * THEN genesis state is returned */ TEST_F(RuntimeUpgradeTrackerTest, AutoUpgradeAfterEmpty) { - tracker_->subscribeToBlockchainEvents(sub_engine_, block_tree_); + tracker_->subscribeToBlockchainEvents(sub_engine_); - EXPECT_CALL(*header_repo_, getBlockHeader(block_2.hash)) + EXPECT_CALL(*block_tree_, getBlockHeader(block_2.hash)) .WillRepeatedly(testing::Return(block_2_header)); EXPECT_OUTCOME_TRUE(state, tracker_->getLastCodeUpdateState(block_2)); ASSERT_EQ(state, block_2_header.state_root); @@ -156,12 +148,12 @@ TEST_F(RuntimeUpgradeTrackerTest, AutoUpgradeAfterEmpty) { } TEST_F(RuntimeUpgradeTrackerTest, CorrectUpgradeScenario) { - tracker_->subscribeToBlockchainEvents(sub_engine_, block_tree_); + tracker_->subscribeToBlockchainEvents(sub_engine_); EXPECT_CALL(*block_tree_, getLastFinalized()) .WillRepeatedly(testing::Return(makeBlockInfo(100500))); // first we execute block #1 - EXPECT_CALL(*header_repo_, getBlockHeader(genesis_block.hash)) + EXPECT_CALL(*block_tree_, getBlockHeader(genesis_block.hash)) .WillRepeatedly(testing::Return(genesis_block_header)); EXPECT_OUTCOME_TRUE(state1, tracker_->getLastCodeUpdateState(genesis_block)); @@ -170,9 +162,9 @@ TEST_F(RuntimeUpgradeTrackerTest, CorrectUpgradeScenario) { // then we upgrade in block #42 auto block_41_header = makeBlockHeader(41); auto block_41 = makeBlockInfo(41); - EXPECT_CALL(*header_repo_, getBlockHeader(genesis_block.hash)) + EXPECT_CALL(*block_tree_, getBlockHeader(genesis_block.hash)) .WillRepeatedly(testing::Return(genesis_block_header)); - EXPECT_CALL(*header_repo_, getBlockHeader(block_42.hash)) + EXPECT_CALL(*block_tree_, getBlockHeader(block_42.hash)) .WillRepeatedly(testing::Return(block_42_header)); EXPECT_OUTCOME_TRUE(state42, tracker_->getLastCodeUpdateState(block_41)); @@ -188,7 +180,7 @@ TEST_F(RuntimeUpgradeTrackerTest, CorrectUpgradeScenario) { EXPECT_CALL(*block_tree_, getChildren(block_41.hash)) .WillRepeatedly(testing::Return( std::vector{block_42.hash})); - EXPECT_CALL(*header_repo_, getBlockHeader(block_42.hash)) + EXPECT_CALL(*block_tree_, getBlockHeader(block_42.hash)) .WillRepeatedly(testing::Return(block_42_header)); EXPECT_OUTCOME_TRUE(state43, tracker_->getLastCodeUpdateState(block_42)); @@ -209,10 +201,10 @@ TEST_F(RuntimeUpgradeTrackerTest, CodeSubstituteAndStore) { EXPECT_CALL(*block_tree_, getLastFinalized()) .WillRepeatedly(testing::Return(makeBlockInfo(5203205))); - tracker_->subscribeToBlockchainEvents(sub_engine_, block_tree_); + tracker_->subscribeToBlockchainEvents(sub_engine_); auto block1 = makeBlockInfo(5200000); // took a block before code update!!! auto block1_header = makeBlockHeader(5200000); - EXPECT_CALL(*header_repo_, getBlockHeader(block1.hash)) + EXPECT_CALL(*block_tree_, getBlockHeader(block1.hash)) .WillRepeatedly(testing::Return(block1_header)); sub_engine_->notify( kagome::primitives::events::ChainEventType::kNewRuntime, @@ -220,27 +212,25 @@ TEST_F(RuntimeUpgradeTrackerTest, CodeSubstituteAndStore) { auto block2 = makeBlockInfo(5203203); auto block2_header = makeBlockHeader(5203203); - EXPECT_CALL(*header_repo_, getBlockHeader(block2.hash)) + EXPECT_CALL(*block_tree_, getBlockHeader(block2.hash)) .WillRepeatedly(testing::Return(block2_header)); known_code_substitutes_.reset( new kagome::primitives::CodeSubstituteBlockIds{{block2.number}}); // reset tracker - tracker_ = - kagome::runtime::RuntimeUpgradeTrackerImpl::create( - header_repo_, storage_, known_code_substitutes_, block_storage_) - .value(); - tracker_->subscribeToBlockchainEvents(sub_engine_, block_tree_); + tracker_ = kagome::runtime::RuntimeUpgradeTrackerImpl::create( + storage_, known_code_substitutes_, block_tree_) + .value(); + tracker_->subscribeToBlockchainEvents(sub_engine_); EXPECT_OUTCOME_TRUE(state2, tracker_->getLastCodeUpdateState(block2)); ASSERT_EQ(state2, block2_header.state_root); // reset tracker - tracker_ = - kagome::runtime::RuntimeUpgradeTrackerImpl::create( - header_repo_, storage_, known_code_substitutes_, block_storage_) - .value(); - tracker_->subscribeToBlockchainEvents(sub_engine_, block_tree_); + tracker_ = kagome::runtime::RuntimeUpgradeTrackerImpl::create( + storage_, known_code_substitutes_, block_tree_) + .value(); + tracker_->subscribeToBlockchainEvents(sub_engine_); auto block3 = makeBlockInfo(5203204); EXPECT_OUTCOME_TRUE(state3, tracker_->getLastCodeUpdateState(block3)); @@ -253,25 +243,24 @@ TEST_F(RuntimeUpgradeTrackerTest, UpgradeAfterCodeSubstitute) { EXPECT_CALL(*block_tree_, hasDirectChain(testing::_, testing::_)) .WillRepeatedly(testing::Return(true)); - tracker_ = - kagome::runtime::RuntimeUpgradeTrackerImpl::create( - header_repo_, storage_, known_code_substitutes_, block_storage_) - .value(); - tracker_->subscribeToBlockchainEvents(sub_engine_, block_tree_); + tracker_ = kagome::runtime::RuntimeUpgradeTrackerImpl::create( + storage_, known_code_substitutes_, block_tree_) + .value(); + tracker_->subscribeToBlockchainEvents(sub_engine_); auto block1 = makeBlockInfo(5203203); auto block1_header = makeBlockHeader(5203203); known_code_substitutes_.reset( new kagome::primitives::CodeSubstituteBlockIds({block1.number})); - EXPECT_CALL(*header_repo_, getBlockHeader(block1.hash)) + EXPECT_CALL(*block_tree_, getBlockHeader(block1.hash)) .WillOnce(testing::Return(block1_header)); EXPECT_OUTCOME_TRUE_1(tracker_->getLastCodeUpdateState(block1)); // @see https://polkadot.subscan.io/event?module=system&event=codeupdated auto block2 = makeBlockInfo(5661442); auto block2_header = makeBlockHeader(5661442); - EXPECT_CALL(*header_repo_, getBlockHeader(block2.hash)) + EXPECT_CALL(*block_tree_, getBlockHeader(block2.hash)) .WillRepeatedly(testing::Return(block2_header)); sub_engine_->notify( kagome::primitives::events::ChainEventType::kNewRuntime, @@ -286,7 +275,7 @@ TEST_F(RuntimeUpgradeTrackerTest, UpgradeAfterCodeSubstitute) { } TEST_F(RuntimeUpgradeTrackerTest, OrphanBlock) { - tracker_->subscribeToBlockchainEvents(sub_engine_, block_tree_); + tracker_->subscribeToBlockchainEvents(sub_engine_); // suppose we have two forks // / - 33f2 // 32 - 33f1 - 34f1 @@ -297,7 +286,7 @@ TEST_F(RuntimeUpgradeTrackerTest, OrphanBlock) { // and then we receive 34f2 with a runtime upgrade auto block_34f2 = makeBlockInfo(34, 2); auto block_34f2_header = makeBlockHeader(34, 2); - EXPECT_CALL(*header_repo_, getBlockHeader(block_34f2.hash)) + EXPECT_CALL(*block_tree_, getBlockHeader(block_34f2.hash)) .WillRepeatedly(testing::Return(block_34f2_header)); sub_engine_->notify( kagome::primitives::events::ChainEventType::kNewRuntime, @@ -306,7 +295,7 @@ TEST_F(RuntimeUpgradeTrackerTest, OrphanBlock) { // and then we receive 35f1 and query the latest runtime for it auto block_35f1 = makeBlockInfo(35, 1); auto block_35f1_header = makeBlockHeader(35, 1); - EXPECT_CALL(*header_repo_, getBlockHeader(block_35f1.hash)) + EXPECT_CALL(*block_tree_, getBlockHeader(block_35f1.hash)) .WillRepeatedly(testing::Return(block_35f1_header)); EXPECT_CALL(*block_tree_, hasDirectChain(block_34f2.hash, block_35f1.hash)) @@ -320,7 +309,7 @@ TEST_F(RuntimeUpgradeTrackerTest, OrphanBlock) { auto block_33f1 = makeBlockInfo(33, 1); auto block_33f1_header = makeBlockHeader(33, 1); - EXPECT_CALL(*header_repo_, getBlockHeader(block_33f1.hash)) + EXPECT_CALL(*block_tree_, getBlockHeader(block_33f1.hash)) .WillRepeatedly(testing::Return(block_33f1_header)); sub_engine_->notify( kagome::primitives::events::ChainEventType::kNewRuntime, diff --git a/test/core/runtime/trie_storage_provider_test.cpp b/test/core/runtime/trie_storage_provider_test.cpp index bbd8618d1e..20429dfa56 100644 --- a/test/core/runtime/trie_storage_provider_test.cpp +++ b/test/core/runtime/trie_storage_provider_test.cpp @@ -11,6 +11,7 @@ #include "common/buffer.hpp" #include "mock/core/storage/trie_pruner/trie_pruner_mock.hpp" #include "runtime/common/runtime_execution_error.hpp" +#include "storage/in_memory/in_memory_spaced_storage.hpp" #include "storage/trie/impl/trie_storage_backend_impl.hpp" #include "storage/trie/impl/trie_storage_impl.hpp" #include "storage/trie/polkadot_trie/polkadot_trie_factory_impl.hpp" @@ -19,7 +20,6 @@ #include "testutil/literals.hpp" #include "testutil/outcome.hpp" #include "testutil/prepare_loggers.hpp" -#include "testutil/storage/in_memory/in_memory_spaced_storage.hpp" using kagome::common::Buffer; using kagome::runtime::RuntimeExecutionError; diff --git a/test/core/runtime/wavm/CMakeLists.txt b/test/core/runtime/wavm/CMakeLists.txt index 8b791f22bc..88ba02c971 100644 --- a/test/core/runtime/wavm/CMakeLists.txt +++ b/test/core/runtime/wavm/CMakeLists.txt @@ -42,5 +42,4 @@ target_link_libraries(core_integration_test storage filesystem logger_for_tests - in_memory_storage ) diff --git a/test/core/runtime/wavm/core_integration_test.cpp b/test/core/runtime/wavm/core_integration_test.cpp index 3466b7afad..62ee19854e 100644 --- a/test/core/runtime/wavm/core_integration_test.cpp +++ b/test/core/runtime/wavm/core_integration_test.cpp @@ -44,7 +44,7 @@ class CoreTest : public ::testing::Test, public WavmRuntimeTest { SetUpImpl(); core_ = - std::make_shared(executor_, nullptr, header_repo_, nullptr); + std::make_shared(executor_, nullptr, block_tree_, nullptr); } protected: diff --git a/test/core/storage/changes_trie/CMakeLists.txt b/test/core/storage/changes_trie/CMakeLists.txt index 713b42cd26..3d4fb48fac 100644 --- a/test/core/storage/changes_trie/CMakeLists.txt +++ b/test/core/storage/changes_trie/CMakeLists.txt @@ -10,6 +10,5 @@ addtest(changes_tracker_test target_link_libraries(changes_tracker_test storage logger_for_tests - in_memory_storage ) diff --git a/test/core/storage/changes_trie/changes_tracker_test.cpp b/test/core/storage/changes_trie/changes_tracker_test.cpp index 269c2aa4c2..b7165ed51d 100644 --- a/test/core/storage/changes_trie/changes_tracker_test.cpp +++ b/test/core/storage/changes_trie/changes_tracker_test.cpp @@ -12,6 +12,8 @@ #include "mock/core/storage/trie_pruner/trie_pruner_mock.hpp" #include "primitives/event_types.hpp" #include "scale/scale.hpp" +#include "storage/in_memory/in_memory_spaced_storage.hpp" +#include "storage/in_memory/in_memory_storage.hpp" #include "storage/trie/impl/persistent_trie_batch_impl.hpp" #include "storage/trie/impl/trie_storage_backend_impl.hpp" #include "storage/trie/polkadot_trie/polkadot_trie_factory_impl.hpp" @@ -20,8 +22,6 @@ #include "testutil/literals.hpp" #include "testutil/outcome.hpp" #include "testutil/prepare_loggers.hpp" -#include "testutil/storage/in_memory/in_memory_spaced_storage.hpp" -#include "testutil/storage/in_memory/in_memory_storage.hpp" using kagome::api::Session; using kagome::blockchain::BlockHeaderRepositoryMock; diff --git a/test/core/storage/trie/polkadot_trie/polkadot_trie_test.cpp b/test/core/storage/trie/polkadot_trie/polkadot_trie_test.cpp index f2ff8f08bd..022d5ff90b 100644 --- a/test/core/storage/trie/polkadot_trie/polkadot_trie_test.cpp +++ b/test/core/storage/trie/polkadot_trie/polkadot_trie_test.cpp @@ -6,12 +6,12 @@ #include +#include "storage/in_memory/in_memory_storage.hpp" #include "storage/trie/polkadot_trie/polkadot_trie_impl.hpp" #include "storage/trie/polkadot_trie/trie_error.hpp" #include "testutil/literals.hpp" #include "testutil/outcome.hpp" #include "testutil/prepare_loggers.hpp" -#include "testutil/storage/in_memory/in_memory_storage.hpp" #include "testutil/storage/polkadot_trie_printer.hpp" using kagome::common::Buffer; diff --git a/test/core/storage/trie/polkadot_trie_cursor_dummy.hpp b/test/core/storage/trie/polkadot_trie_cursor_dummy.hpp index a9c38398db..6f4eb97246 100644 --- a/test/core/storage/trie/polkadot_trie_cursor_dummy.hpp +++ b/test/core/storage/trie/polkadot_trie_cursor_dummy.hpp @@ -6,10 +6,7 @@ #pragma once -#include "common/blob.hpp" -#include "crypto/blake2/blake2b.h" #include "storage/trie/polkadot_trie/polkadot_trie_cursor.hpp" -#include "storage/trie/polkadot_trie/trie_node.hpp" namespace kagome::storage::trie { @@ -17,7 +14,7 @@ namespace kagome::storage::trie { class PolkadotTrieCursorDummy : public PolkadotTrieCursor { private: std::map> key_val_; - decltype(key_val_)::iterator current_; + decltype(key_val_.begin()) current_; public: explicit PolkadotTrieCursorDummy( @@ -72,9 +69,5 @@ namespace kagome::storage::trie { std::optional value() const override { return BufferView{current_->second}; } - - std::optional valueHash() const override { - return ValueHash{crypto::blake2b<32>(current_->second), false}; - } }; } // namespace kagome::storage::trie diff --git a/test/core/storage/trie/trie_storage/CMakeLists.txt b/test/core/storage/trie/trie_storage/CMakeLists.txt index 605afc4e5b..a5e0b265d5 100644 --- a/test/core/storage/trie/trie_storage/CMakeLists.txt +++ b/test/core/storage/trie/trie_storage/CMakeLists.txt @@ -18,5 +18,12 @@ target_link_libraries(polkadot_trie_storage_test Boost::boost base_rocksdb_test logger_for_tests - in_memory_storage + ) + +addtest(trie_storage_backend_test + trie_storage_backend_test.cpp + ) +target_link_libraries(trie_storage_backend_test + storage + blob ) diff --git a/test/core/storage/trie/trie_storage/polkadot_codec_node_decoding_test.cpp b/test/core/storage/trie/trie_storage/polkadot_codec_node_decoding_test.cpp index 17e8366c60..ce286d72c7 100644 --- a/test/core/storage/trie/trie_storage/polkadot_codec_node_decoding_test.cpp +++ b/test/core/storage/trie/trie_storage/polkadot_codec_node_decoding_test.cpp @@ -7,7 +7,6 @@ #include #include - #include "storage/trie/polkadot_trie/trie_node.hpp" #include "storage/trie/serialization/buffer_stream.hpp" #include "storage/trie/serialization/polkadot_codec.hpp" @@ -31,7 +30,7 @@ TEST_P(NodeDecodingTest, GetHeader) { EXPECT_OUTCOME_TRUE( encoded, codec->encodeNode(*node, storage::trie::StateVersion::V0, {})); EXPECT_OUTCOME_TRUE(decoded, codec->decodeNode(encoded)); - auto decoded_node = decoded; + auto decoded_node = std::dynamic_pointer_cast(decoded); EXPECT_EQ(decoded_node->getKeyNibbles(), node->getKeyNibbles()); EXPECT_EQ(decoded_node->getValue(), node->getValue()); } diff --git a/test/core/storage/trie/trie_storage/trie_batch_test.cpp b/test/core/storage/trie/trie_storage/trie_batch_test.cpp index 637f3eea8b..bd79ae4723 100644 --- a/test/core/storage/trie/trie_storage/trie_batch_test.cpp +++ b/test/core/storage/trie/trie_storage/trie_batch_test.cpp @@ -6,14 +6,11 @@ #include #include -#include -#include "gmock/gmock.h" #include "mock/core/storage/spaced_storage_mock.hpp" -#include "mock/core/storage/trie/trie_batches_mock.hpp" #include "mock/core/storage/trie_pruner/trie_pruner_mock.hpp" -#include "mock/core/storage/write_batch_mock.hpp" #include "storage/changes_trie/impl/storage_changes_tracker_impl.hpp" +#include "storage/in_memory/in_memory_storage.hpp" #include "storage/trie/impl/topper_trie_batch_impl.hpp" #include "storage/trie/impl/trie_storage_backend_impl.hpp" #include "storage/trie/impl/trie_storage_impl.hpp" @@ -24,8 +21,6 @@ #include "testutil/literals.hpp" #include "testutil/outcome.hpp" #include "testutil/storage/base_rocksdb_test.hpp" -#include "testutil/storage/in_memory/in_memory_batch.hpp" -#include "testutil/storage/in_memory/in_memory_storage.hpp" using namespace kagome::storage::trie; using kagome::api::Session; @@ -209,10 +204,6 @@ TEST_F(TrieBatchTest, ConsistentOnFailure) { auto spaced_db = std::make_shared(); ON_CALL(*spaced_db, getSpace(Space::kTrieNode)).WillByDefault(Return(db)); - ON_CALL(*spaced_db, getSpace(Space::kTrieValue)).WillByDefault(Return(db)); - ON_CALL(*spaced_db, createBatch()).WillByDefault(Invoke([&spaced_db]() { - return std::make_unique(*spaced_db); - })); auto factory = std::make_shared(); auto codec = std::make_shared(); diff --git a/test/core/storage/trie/trie_storage/trie_storage_backend_test.cpp b/test/core/storage/trie/trie_storage/trie_storage_backend_test.cpp new file mode 100644 index 0000000000..9c9598cd4a --- /dev/null +++ b/test/core/storage/trie/trie_storage/trie_storage_backend_test.cpp @@ -0,0 +1,93 @@ +/** + * Copyright Quadrivium LLC + * All Rights Reserved + * SPDX-License-Identifier: Apache-2.0 + */ + +#include + +#include + +#include "mock/core/storage/generic_storage_mock.hpp" +#include "mock/core/storage/spaced_storage_mock.hpp" +#include "mock/core/storage/write_batch_mock.hpp" +#include "storage/trie/impl/trie_storage_backend_impl.hpp" +#include "testutil/literals.hpp" +#include "testutil/outcome.hpp" + +using kagome::common::Buffer; +using kagome::common::BufferView; +using kagome::storage::BufferStorageMock; +using kagome::storage::SpacedStorageMock; +using kagome::storage::face::WriteBatchMock; +using kagome::storage::trie::TrieStorageBackendImpl; +using testing::Invoke; +using testing::Return; + +class TrieDbBackendTest : public testing::Test { + public: + void SetUp() override { + ON_CALL(*spaced_storage, getSpace(kagome::storage::Space::kTrieNode)) + .WillByDefault(Return(storage)); + backend = std::make_unique(spaced_storage); + } + + std::shared_ptr storage = + std::make_shared(); + std::shared_ptr spaced_storage = + std::make_shared(); + std::unique_ptr backend; +}; + +/** + * @given trie backend + * @when put a value to it + * @then it puts a prefixed value to the storage + */ +TEST_F(TrieDbBackendTest, Put) { + auto key = "abc"_buf; + ((*storage).gmock_put(BufferView{key}, "123"_buf))( + ::testing::internal::GetWithoutMatchers(), nullptr) + .InternalExpectedAt( + "_file_name_", 40, "*storage", "put(prefixed, \"123\"_buf)") + .WillOnce(Return(outcome::success())); + EXPECT_OUTCOME_TRUE_1(backend->put("abc"_buf, "123"_buf)); +} + +/** + * @given trie backend + * @when get a value from it + * @then it takes a prefixed value from the storage + */ +TEST_F(TrieDbBackendTest, Get) { + auto key = "abc"_buf; + EXPECT_CALL(*storage, getMock(BufferView{key})).WillOnce(Return("123"_buf)); + EXPECT_OUTCOME_TRUE_1(backend->get("abc"_buf)); +} + +/** + * @given trie backend batch + * @when perform operations on it + * @then it delegates them to the underlying storage batch with added prefixes + */ +TEST_F(TrieDbBackendTest, Batch) { + auto batch_mock = std::make_unique>(); + auto buf_abc = "abc"_buf; + EXPECT_CALL(*batch_mock, put(buf_abc.view(), "123"_buf)) + .WillOnce(Return(outcome::success())); + auto buf_def = "def"_buf; + EXPECT_CALL(*batch_mock, put(buf_def.view(), "123"_buf)) + .WillOnce(Return(outcome::success())); + EXPECT_CALL(*batch_mock, remove(buf_abc.view())) + .WillOnce(Return(outcome::success())); + EXPECT_CALL(*batch_mock, commit()).WillOnce(Return(outcome::success())); + + EXPECT_CALL(*storage, batch()) + .WillOnce(Return(testing::ByMove(std::move(batch_mock)))); + + auto batch = backend->batch(); + EXPECT_OUTCOME_TRUE_1(batch->put("abc"_buf, "123"_buf)); + EXPECT_OUTCOME_TRUE_1(batch->put("def"_buf, "123"_buf)); + EXPECT_OUTCOME_TRUE_1(batch->remove("abc"_buf)); + EXPECT_OUTCOME_TRUE_1(batch->commit()); +} diff --git a/test/core/storage/trie_pruner/trie_pruner_test.cpp b/test/core/storage/trie_pruner/trie_pruner_test.cpp index 885e3956bf..22ee018a46 100644 --- a/test/core/storage/trie_pruner/trie_pruner_test.cpp +++ b/test/core/storage/trie_pruner/trie_pruner_test.cpp @@ -20,10 +20,7 @@ #include "mock/core/storage/trie/serialization/trie_serializer_mock.hpp" #include "mock/core/storage/trie/trie_storage_backend_mock.hpp" #include "mock/core/storage/write_batch_mock.hpp" -#include "storage/buffer_map_types.hpp" #include "storage/database_error.hpp" -#include "storage/spaces.hpp" -#include "storage/trie/impl/trie_storage_backend_impl.hpp" #include "storage/trie/polkadot_trie/polkadot_trie_factory_impl.hpp" #include "storage/trie/polkadot_trie/polkadot_trie_impl.hpp" #include "storage/trie/serialization/polkadot_codec.hpp" @@ -167,7 +164,8 @@ class TriePrunerTest : public testing::Test { ON_CALL(*config_mock, statePruningDepth()).WillByDefault(Return(16)); ON_CALL(*config_mock, enableThoroughPruning()).WillByDefault(Return(true)); - trie_node_storage_mock.reset(new testing::NiceMock()); + trie_node_storage_mock.reset( + new testing::NiceMock()); persistent_storage_mock.reset( new testing::NiceMock); serializer_mock.reset(new testing::NiceMock); @@ -186,13 +184,10 @@ class TriePrunerTest : public testing::Test { ON_CALL(*persistent_storage_mock, getSpace(kDefault)) .WillByDefault(Invoke([this](auto) { return pruner_space; })); - ON_CALL(*persistent_storage_mock, getSpace(kTrieNode)) - .WillByDefault(Invoke([this](auto) { return trie_node_storage_mock; })); - ON_CALL(*persistent_storage_mock, getSpace(kTrieValue)) - .WillByDefault(Invoke([this](auto) { return trie_node_storage_mock; })); pruner.reset(new TriePrunerImpl( std::make_shared(), + trie_node_storage_mock, serializer_mock, codec_mock, persistent_storage_mock, @@ -218,6 +213,7 @@ class TriePrunerTest : public testing::Test { pruner.reset(new TriePrunerImpl( std::make_shared(), + trie_node_storage_mock, serializer_mock, codec_mock, persistent_storage_mock, @@ -262,7 +258,7 @@ class TriePrunerTest : public testing::Test { std::unique_ptr pruner; std::shared_ptr serializer_mock; - std::shared_ptr trie_node_storage_mock; + std::shared_ptr trie_node_storage_mock; std::shared_ptr> persistent_storage_mock; std::shared_ptr codec_mock; std::shared_ptr hasher; @@ -352,15 +348,12 @@ TEST_F(TriePrunerTest, BasicScenario) { {{"_0"_hash256, makeTransparentNode({NODE, "_0"_hash256, {}})}, {"_5"_hash256, makeTransparentNode({NODE, "_5"_hash256, {}})}}})); - EXPECT_CALL(*persistent_storage_mock, createBatch()) - .WillRepeatedly(Invoke([]() { - auto batch = - std::make_unique>(); - EXPECT_CALL(*batch, remove(_, _)) - .WillRepeatedly(Return(outcome::success())); - EXPECT_CALL(*batch, commit()).WillOnce(Return(outcome::success())); - return batch; - })); + EXPECT_CALL(*trie_node_storage_mock, batch()).WillRepeatedly(Invoke([]() { + auto batch = std::make_unique>(); + EXPECT_CALL(*batch, remove(_)).WillRepeatedly(Return(outcome::success())); + EXPECT_CALL(*batch, commit()).WillOnce(Return(outcome::success())); + return batch; + })); EXPECT_CALL(*serializer_mock, retrieveTrie("root1"_hash256, _)) .WillOnce(testing::Return(trie)); BlockHeader header1{.number = 1, .state_root = "root1"_hash256}; @@ -476,7 +469,7 @@ TEST_F(TriePrunerTest, RandomTree) { std::map node_storage; std::set inserted_keys; - EXPECT_CALL(*trie_node_storage_mock, getMock(_)) + EXPECT_CALL(*trie_node_storage_mock, get(_)) .WillRepeatedly( Invoke([&node_storage](auto &k) -> outcome::result { auto it = node_storage.find(k); @@ -487,9 +480,7 @@ TEST_F(TriePrunerTest, RandomTree) { })); trie::TrieSerializerImpl serializer{ - trie_factory, - codec, - std::make_shared(persistent_storage_mock)}; + trie_factory, codec, trie_node_storage_mock}; std::vector> kv; std::mt19937 rand; rand.seed(42); @@ -509,27 +500,21 @@ TEST_F(TriePrunerTest, RandomTree) { return serializer.retrieveNode(node, nullptr); })); - EXPECT_CALL(*persistent_storage_mock, createBatch()) - .WillRepeatedly(Invoke([&node_storage]() { - auto batch_mock = - std::make_unique>(); - EXPECT_CALL(*batch_mock, put(Space::kTrieNode, _, _)) - .WillRepeatedly(Invoke([&node_storage](Space, auto &k, auto &v) { - node_storage[k] = v; - return outcome::success(); - })); - EXPECT_CALL(*batch_mock, remove(Space::kTrieNode, _)) - .WillRepeatedly(Invoke([&node_storage](Space, auto &k) { - node_storage.erase(k); - return outcome::success(); - })); - - EXPECT_CALL(*batch_mock, commit()) - .WillRepeatedly(Return(outcome::success())); - return batch_mock; - })); - for (unsigned i = 0; i < STATES_NUM; i++) { + EXPECT_CALL(*trie_node_storage_mock, batch()) + .WillOnce(Invoke([&node_storage]() { + auto batch_mock = + std::make_unique>(); + EXPECT_CALL(*batch_mock, put(_, _)) + .WillRepeatedly(Invoke([&node_storage](auto &k, auto &v) { + node_storage[k] = v; + return outcome::success(); + })); + EXPECT_CALL(*batch_mock, commit()) + .WillRepeatedly(Return(outcome::success())); + return batch_mock; + })); + for (unsigned j = 0; j < INSERT_PER_STATE; j++) { auto k = randomBuffer(rand); inserted_keys.insert(k); @@ -566,6 +551,19 @@ TEST_F(TriePrunerTest, RandomTree) { roots.push_back(root); if (i >= 16) { + EXPECT_CALL(*trie_node_storage_mock, batch()) + .WillOnce(Invoke([&node_storage]() { + auto batch = + std::make_unique>(); + EXPECT_CALL(*batch, remove(_)) + .WillRepeatedly(Invoke([&node_storage](auto &k) { + node_storage.erase(k); + return outcome::success(); + })); + EXPECT_CALL(*batch, commit()).WillOnce(Return(outcome::success())); + return batch; + })); + const auto &root = roots[i - 16]; BlockHeader header{.number = i - 16, .state_root = root}; @@ -574,12 +572,11 @@ TEST_F(TriePrunerTest, RandomTree) { } } for (unsigned i = STATES_NUM - 16; i < STATES_NUM; i++) { - EXPECT_CALL(*persistent_storage_mock, createBatch()) + EXPECT_CALL(*trie_node_storage_mock, batch()) .WillOnce(Invoke([&node_storage]() { - auto batch = - std::make_unique>(); - EXPECT_CALL(*batch, remove(Space::kTrieNode, _)) - .WillRepeatedly(Invoke([&node_storage](Space, auto &k) { + auto batch = std::make_unique>(); + EXPECT_CALL(*batch, remove(_)) + .WillRepeatedly(Invoke([&node_storage](auto &k) { node_storage.erase(k); return outcome::success(); })); @@ -709,7 +706,7 @@ TEST_F(TriePrunerTest, FastSyncScenario) { auto block_tree = std::make_shared>(); - ON_CALL(*trie_node_storage_mock, getMock(_)) + ON_CALL(*trie_node_storage_mock, get(_)) .WillByDefault(Invoke( [&](auto &key) -> outcome::result { if (node_storage.count(key) == 0) { @@ -718,19 +715,18 @@ TEST_F(TriePrunerTest, FastSyncScenario) { return kagome::common::BufferOrView{node_storage.at(key).view()}; })); - ON_CALL(*persistent_storage_mock, createBatch()).WillByDefault(Invoke([&]() { + ON_CALL(*trie_node_storage_mock, batch()).WillByDefault(Invoke([&]() { auto batch = std::make_unique< - testing::NiceMock>>(); - ON_CALL(*batch, put(Space::kTrieNode, _, _)) - .WillByDefault(Invoke([&](Space, auto &key, auto &value) { + testing::NiceMock>>(); + ON_CALL(*batch, put(_, _)) + .WillByDefault(Invoke([&](auto &key, auto &value) { node_storage[key] = value; return outcome::success(); })); - ON_CALL(*batch, remove(Space::kTrieNode, _)) - .WillByDefault(Invoke([&](Space, auto &key) { - node_storage.erase(key); - return outcome::success(); - })); + ON_CALL(*batch, remove(_)).WillByDefault(Invoke([&](auto &key) { + node_storage.erase(key); + return outcome::success(); + })); ON_CALL(*batch, commit()).WillByDefault(Return(outcome::success())); return batch; })); @@ -748,9 +744,7 @@ TEST_F(TriePrunerTest, FastSyncScenario) { .value()); trie::TrieSerializerImpl serializer{ - trie_factory, - codec, - std::make_shared(persistent_storage_mock)}; + trie_factory, codec, trie_node_storage_mock}; ON_CALL(*serializer_mock, retrieveTrie(genesis_state_root, _)) .WillByDefault(Return(genesis_trie)); diff --git a/test/external-project-test/link_libraries.cmake b/test/external-project-test/link_libraries.cmake index d821953d7c..310d7f0591 100644 --- a/test/external-project-test/link_libraries.cmake +++ b/test/external-project-test/link_libraries.cmake @@ -34,6 +34,7 @@ function(external_project_link_libraries target prefix) blockchain runtime_upgrade_tracker runtime_properties_cache + application_injector ) list(TRANSFORM targets PREPEND "${prefix}") target_link_libraries(${target} ${targets}) diff --git a/test/external-project-test/src/main.cpp b/test/external-project-test/src/main.cpp index ff5c8c3206..b1682e8c56 100644 --- a/test/external-project-test/src/main.cpp +++ b/test/external-project-test/src/main.cpp @@ -4,44 +4,12 @@ * SPDX-License-Identifier: Apache-2.0 */ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include +#include +#include #include -#include #include +#include +#include int main() { libp2p::common::FinalAction flush_std_streams_at_exit([] { @@ -61,182 +29,8 @@ int main() { } kagome::log::setLoggingSystem(logging_system); - kagome::application::AppConfigurationImpl app_config; - - rocksdb::Options db_options{}; - db_options.create_if_missing = true; - std::shared_ptr database = - kagome::storage::RocksDb::create("/tmp/kagome_tmp_db", db_options) - .value(); - auto hasher = std::make_shared(); - auto header_repo = - std::make_shared(database, - hasher); - - using std::string_literals::operator""s; - - auto chain_spec = kagome::application::ChainSpecImpl::loadFrom( - kagome::filesystem::path(__FILE__).parent_path() - / "../../../examples/polkadot/polkadot.json"s) - .value(); - - auto code_substitutes = chain_spec->codeSubstitutes(); - - auto config = std::make_shared(); - - auto trie_factory = - std::make_shared(); - auto codec = std::make_shared(); - auto node_storage_backend = - std::make_shared(database); - auto serializer = std::make_shared( - trie_factory, codec, node_storage_backend); - - auto app_state_manager = - std::make_shared(); - - auto state_pruner = - std::make_shared( - app_state_manager, serializer, codec, database, hasher, config); - - std::shared_ptr trie_storage = - kagome::storage::trie::TrieStorageImpl::createEmpty( - trie_factory, codec, serializer, state_pruner) - .value(); - - auto batch = - trie_storage - ->getPersistentBatchAt(serializer->getEmptyRootHash(), std::nullopt) - .value(); - auto root_hash = - batch->commit(kagome::storage::trie::StateVersion::V0).value(); - auto block_storage = - kagome::blockchain::BlockStorageImpl::create(root_hash, database, hasher) - .value(); - std::shared_ptr - runtime_upgrade_tracker = - std::move(kagome::runtime::RuntimeUpgradeTrackerImpl::create( - header_repo, database, code_substitutes, block_storage) - .value()); - - auto storage_batch = - trie_storage - ->getPersistentBatchAt(serializer->getEmptyRootHash(), std::nullopt) - .value(); - for (auto &kv : chain_spec->getGenesisTopSection()) { - storage_batch->put(kv.first, kv.second.view()).value(); - } - storage_batch->commit(kagome::storage::trie::StateVersion::V0).value(); - - auto code_provider = - std::make_shared( - trie_storage, runtime_upgrade_tracker, code_substitutes, chain_spec); - - auto generator = - std::make_shared(); - auto ecdsa_provider = - std::make_shared(hasher); - auto ed25519_provider = - std::make_shared(hasher); - auto sr25519_provider = - std::make_shared(); - auto bandersnatch_provider = - std::make_shared(hasher); - auto secp256k1_provider = - std::make_shared(); - auto pbkdf2_provider = std::make_shared(); - auto bip39_provider = std::make_shared( - pbkdf2_provider, hasher); - - auto elliptic_curves = std::make_shared(); - - auto key_store_dir = "/tmp/kagome_tmp_key_storage"; - std::shared_ptr key_fs = - kagome::crypto::KeyFileStorage::createAt(key_store_dir).value(); - auto csprng = - std::make_shared(); - - auto sr_suite = std::make_unique< - kagome::crypto::KeySuiteStoreImpl>( - sr25519_provider, bip39_provider, csprng, key_fs); - auto ed_suite = std::make_unique< - kagome::crypto::KeySuiteStoreImpl>( - ed25519_provider, bip39_provider, csprng, key_fs); - auto ecdsa_suite = std::make_unique< - kagome::crypto::KeySuiteStoreImpl>( - ecdsa_provider, bip39_provider, csprng, key_fs); - auto bandersnatch_suite = std::make_unique< - kagome::crypto::KeySuiteStoreImpl>( - bandersnatch_provider, bip39_provider, csprng, key_fs); - - auto crypto_store = std::make_shared( - std::move(sr_suite), - std::move(ed_suite), - std::move(ecdsa_suite), - std::move(bandersnatch_suite), - ed25519_provider, - app_state_manager, - kagome::crypto::KeyStore::Config{key_store_dir}); - - auto offchain_persistent_storage = - std::make_shared( - database); - - auto offchain_worker_pool = - std::make_shared(); - - auto host_api_factory = - std::make_shared( - kagome::host_api::OffchainExtensionConfig{}, - ecdsa_provider, - ed25519_provider, - sr25519_provider, - bandersnatch_provider, - secp256k1_provider, - elliptic_curves, - hasher, - crypto_store, - offchain_persistent_storage, - offchain_worker_pool); - - auto cache = std::make_shared(); - - std::shared_ptr - runtime_instances_pool; - auto injector = boost::di::make_injector( - boost::di::bind().to( - [&](const auto &) { return runtime_instances_pool; })); - auto core_factory = std::make_shared( - hasher, - injector - .create>()); - - auto instance_env_factory = - std::make_shared( - trie_storage, serializer, core_factory, host_api_factory); - - auto module_factory = - std::make_shared( - instance_env_factory, trie_storage, hasher); - - runtime_instances_pool = - std::make_shared( - app_config, - module_factory, - std::make_shared()); - auto module_repo = std::make_shared( - runtime_instances_pool, - hasher, - header_repo, - runtime_upgrade_tracker, - trie_storage, - module_factory, - code_provider); - - [[maybe_unused]] auto ctx_factory = - std::make_shared(module_repo, - header_repo); - [[maybe_unused]] auto executor = - kagome::runtime::Executor(ctx_factory, cache); + auto injector = std::make_shared( + std::make_shared()); + [[maybe_unused]] auto executor = injector->injectExecutor(); return 0; } diff --git a/test/mock/core/application/app_configuration_mock.hpp b/test/mock/core/application/app_configuration_mock.hpp index 3fa0d2761e..fd69d234db 100644 --- a/test/mock/core/application/app_configuration_mock.hpp +++ b/test/mock/core/application/app_configuration_mock.hpp @@ -162,6 +162,8 @@ namespace kagome::application { MOCK_METHOD(uint32_t, luckyPeers, (), (const, override)); + MOCK_METHOD(uint32_t, maxPeers, (), (const, override)); + MOCK_METHOD(bool, isTelemetryEnabled, (), (const, override)); MOCK_METHOD(std::optional, diff --git a/test/mock/core/blockchain/block_header_repository_mock.hpp b/test/mock/core/blockchain/block_header_repository_mock.hpp index cf8c059aa4..4d50de9776 100644 --- a/test/mock/core/blockchain/block_header_repository_mock.hpp +++ b/test/mock/core/blockchain/block_header_repository_mock.hpp @@ -29,8 +29,8 @@ namespace kagome::blockchain { (const primitives::BlockHash &), (const, override)); - MOCK_METHOD(outcome::result, - getBlockStatus, + MOCK_METHOD(outcome::result>, + tryGetBlockHeader, (const primitives::BlockHash &), (const, override)); diff --git a/test/mock/core/blockchain/block_storage_mock.hpp b/test/mock/core/blockchain/block_storage_mock.hpp index 333216dce1..0d25e1d537 100644 --- a/test/mock/core/blockchain/block_storage_mock.hpp +++ b/test/mock/core/blockchain/block_storage_mock.hpp @@ -57,11 +57,16 @@ namespace kagome::blockchain { (const primitives::BlockHeader &), (override)); - MOCK_METHOD(outcome::result>, + MOCK_METHOD(outcome::result, getBlockHeader, (const primitives::BlockHash &), (const, override)); + MOCK_METHOD(outcome::result>, + tryGetBlockHeader, + (const primitives::BlockHash &), + (const, override)); + MOCK_METHOD(outcome::result, putBlockBody, (const primitives::BlockHash &, const primitives::BlockBody &), diff --git a/test/mock/core/blockchain/block_tree_mock.hpp b/test/mock/core/blockchain/block_tree_mock.hpp index 726cbd7a02..d55f2b8207 100644 --- a/test/mock/core/blockchain/block_tree_mock.hpp +++ b/test/mock/core/blockchain/block_tree_mock.hpp @@ -34,6 +34,11 @@ namespace kagome::blockchain { (const primitives::BlockHash &), (const, override)); + MOCK_METHOD(outcome::result>, + tryGetBlockHeader, + (const primitives::BlockHash &), + (const, override)); + MOCK_METHOD(outcome::result, getBlockJustification, (const primitives::BlockHash &), @@ -128,6 +133,16 @@ namespace kagome::blockchain { (const primitives::BlockHash &), (const, override)); + MOCK_METHOD(outcome::result, + getHashByNumber, + (primitives::BlockNumber), + (const, override)); + + MOCK_METHOD(outcome::result, + getNumberByHash, + (const primitives::BlockHash &), + (const, override)); + MOCK_METHOD(primitives::BlockInfo, getLastFinalized, (), (const, override)); MOCK_METHOD(void, warp, (const primitives::BlockInfo &), (override)); diff --git a/test/mock/core/crypto/key_store_mock.hpp b/test/mock/core/crypto/key_store_mock.hpp index 9b7063888a..891c2234dc 100644 --- a/test/mock/core/crypto/key_store_mock.hpp +++ b/test/mock/core/crypto/key_store_mock.hpp @@ -58,7 +58,12 @@ namespace kagome::crypto { std::make_unique>(), std::make_unique>(), std::make_shared(), - std::make_shared(), + [] { + auto app_state_manager = + std::make_shared(); + EXPECT_CALL(*app_state_manager, atPrepare(testing::_)); + return app_state_manager; + }(), KeyStore::Config{{}}}, sr25519_{dynamic_cast &>( KeyStore ::sr25519())}, diff --git a/test/mock/core/network/protocols/parachain.hpp b/test/mock/core/network/protocols/parachain.hpp new file mode 100644 index 0000000000..cebb45042c --- /dev/null +++ b/test/mock/core/network/protocols/parachain.hpp @@ -0,0 +1,18 @@ +/** + * Copyright Quadrivium LLC + * All Rights Reserved + * SPDX-License-Identifier: Apache-2.0 + */ + +#pragma once + +#include "network/impl/protocols/parachain.hpp" + +#include + +namespace kagome::network { + class ValidationProtocolReserveMock : public ValidationProtocolReserve { + public: + MOCK_METHOD(void, reserve, (const PeerId &, bool), (override)); + }; +} // namespace kagome::network diff --git a/test/mock/core/storage/generic_storage_mock.hpp b/test/mock/core/storage/generic_storage_mock.hpp index 96462d092b..55fd3b1ab8 100644 --- a/test/mock/core/storage/generic_storage_mock.hpp +++ b/test/mock/core/storage/generic_storage_mock.hpp @@ -13,15 +13,12 @@ namespace kagome::storage::face { template - struct GenericStorageMock : public BatchableStorage { + struct GenericStorageMock : public GenericStorage { MOCK_METHOD0_T(batch, std::unique_ptr>()); MOCK_METHOD0_T(cursor, std::unique_ptr>()); - MOCK_METHOD(outcome::result>, - getMock, - (const View &), - (const)); + MOCK_METHOD(outcome::result, getMock, (const View &), (const)); MOCK_METHOD(outcome::result>, tryGetMock, diff --git a/test/mock/core/storage/spaced_storage_mock.hpp b/test/mock/core/storage/spaced_storage_mock.hpp index 4355ffaccf..02ec4f789c 100644 --- a/test/mock/core/storage/spaced_storage_mock.hpp +++ b/test/mock/core/storage/spaced_storage_mock.hpp @@ -14,15 +14,7 @@ namespace kagome::storage { class SpacedStorageMock : public SpacedStorage { public: - MOCK_METHOD(std::shared_ptr, - getSpace, - (Space), - (override)); - - MOCK_METHOD(std::unique_ptr, - createBatch, - (), - (override)); + MOCK_METHOD(std::shared_ptr, getSpace, (Space), (override)); }; } // namespace kagome::storage diff --git a/test/mock/core/storage/trie/polkadot_trie_cursor_mock.h b/test/mock/core/storage/trie/polkadot_trie_cursor_mock.h index f0f7fe3c57..38619a7645 100644 --- a/test/mock/core/storage/trie/polkadot_trie_cursor_mock.h +++ b/test/mock/core/storage/trie/polkadot_trie_cursor_mock.h @@ -7,7 +7,6 @@ #ifndef KAGOME_POLKADOT_TRIE_CURSOR_MOCK_H #define KAGOME_POLKADOT_TRIE_CURSOR_MOCK_H -#include "common/blob.hpp" #include "storage/trie/polkadot_trie/polkadot_trie_cursor_impl.hpp" namespace kagome::storage::trie { @@ -39,10 +38,6 @@ namespace kagome::storage::trie { MOCK_METHOD(std::optional, key, (), (const, override)); MOCK_METHOD(std::optional, value, (), (const, override)); - MOCK_METHOD(std::optional, - valueHash, - (), - (const, override)); }; } // namespace kagome::storage::trie diff --git a/test/mock/core/storage/trie/trie_storage_backend_mock.hpp b/test/mock/core/storage/trie/trie_storage_backend_mock.hpp index a8b74a420e..c27288b9f7 100644 --- a/test/mock/core/storage/trie/trie_storage_backend_mock.hpp +++ b/test/mock/core/storage/trie/trie_storage_backend_mock.hpp @@ -14,9 +14,37 @@ namespace kagome::storage::trie { class TrieStorageBackendMock : public TrieStorageBackend { public: - MOCK_METHOD(BufferStorage &, nodes, (), (override)); - MOCK_METHOD(BufferStorage &, values, (), (override)); - MOCK_METHOD(std::unique_ptr, batch, (), (override)); + MOCK_METHOD(std::unique_ptr, batch, (), (override)); + + MOCK_METHOD((std::unique_ptr>), + cursor, + (), + (override)); + + MOCK_METHOD(outcome::result, + get, + (const BufferView &key), + (const, override)); + + MOCK_METHOD(outcome::result>, + tryGet, + (const BufferView &key), + (const, override)); + + MOCK_METHOD(outcome::result, + contains, + (const BufferView &key), + (const, override)); + + MOCK_METHOD(outcome::result, + put, + (const BufferView &key, BufferOrView &&value), + (override)); + + MOCK_METHOD(outcome::result, + remove, + (const BufferView &key), + (override)); }; } // namespace kagome::storage::trie diff --git a/test/mock/core/storage/write_batch_mock.hpp b/test/mock/core/storage/write_batch_mock.hpp index eb3b4d1e16..ba844d2aed 100644 --- a/test/mock/core/storage/write_batch_mock.hpp +++ b/test/mock/core/storage/write_batch_mock.hpp @@ -29,25 +29,4 @@ namespace kagome::storage::face { MOCK_METHOD1_T(remove, outcome::result(const View &key)); }; - template - class SpacedBatchMock : public SpacedBatch { - public: - MOCK_METHOD(outcome::result, commit, (), (override)); - - MOCK_METHOD(void, clear, (), (override)); - - MOCK_METHOD3_T(put, - outcome::result(Space space, - const View &key, - const V &value)); - outcome::result put(Space space, - const View &key, - OwnedOrView &&value) override { - return put(space, key, value.mut()); - } - - MOCK_METHOD2_T(remove, - outcome::result(Space space, const View &key)); - }; - } // namespace kagome::storage::face diff --git a/test/mock/libp2p/crypto/crypto_provider.hpp b/test/mock/libp2p/crypto/crypto_provider.hpp new file mode 100644 index 0000000000..b04624bf22 --- /dev/null +++ b/test/mock/libp2p/crypto/crypto_provider.hpp @@ -0,0 +1,45 @@ +/** + * Copyright Quadrivium LLC + * All Rights Reserved + * SPDX-License-Identifier: Apache-2.0 + */ + +#pragma once + +#include + +#include + +namespace libp2p::crypto { + class CryptoProviderMock : public CryptoProvider { + public: + MOCK_METHOD(outcome::result, + generateKeys, + (Key::Type, common::RSAKeyType), + (const, override)); + + MOCK_METHOD(outcome::result, + derivePublicKey, + (const PrivateKey &), + (const, override)); + + MOCK_METHOD(outcome::result, + sign, + (BytesIn, const PrivateKey &), + (const, override)); + + MOCK_METHOD(outcome::result, + verify, + (BytesIn, BytesIn, const PublicKey &), + (const, override)); + MOCK_METHOD(outcome::result, + generateEphemeralKeyPair, + (common::CurveType), + (const, override)); + + MOCK_METHOD((outcome::result>), + stretchKey, + (common::CipherType, common::HashType, const Buffer &), + (const, override)); + }; +} // namespace libp2p::crypto diff --git a/test/testutil/storage/CMakeLists.txt b/test/testutil/storage/CMakeLists.txt index cd4fe10299..0c89e8b758 100644 --- a/test/testutil/storage/CMakeLists.txt +++ b/test/testutil/storage/CMakeLists.txt @@ -31,10 +31,3 @@ add_library(std_list_adapter INTERFACE) target_link_libraries(std_list_adapter INTERFACE outcome ) - -add_library(in_memory_storage in_memory/in_memory_storage.cpp) -target_link_libraries(in_memory_storage PUBLIC - outcome - blob - storage - ) diff --git a/test/testutil/storage/base_rocksdb_test.cpp b/test/testutil/storage/base_rocksdb_test.cpp index d5c3b38565..497151ea5c 100644 --- a/test/testutil/storage/base_rocksdb_test.cpp +++ b/test/testutil/storage/base_rocksdb_test.cpp @@ -12,7 +12,7 @@ namespace test { rocksdb::Options options; options.create_if_missing = true; - auto r = RocksDB::create(base_path / "rocksdb", options); + auto r = RocksDB::create(getPathString(), options); rocks_ = std::move(r.value()); db_ = rocks_->getSpace(kagome::storage::Space::kDefault); ASSERT_TRUE(rocks_) << "BaseRocksDB_Test: db is nullptr"; diff --git a/test/testutil/storage/base_rocksdb_test.hpp b/test/testutil/storage/base_rocksdb_test.hpp index ddcfe9e055..88fe98740f 100644 --- a/test/testutil/storage/base_rocksdb_test.hpp +++ b/test/testutil/storage/base_rocksdb_test.hpp @@ -6,7 +6,6 @@ #pragma once -#include "storage/buffer_map_types.hpp" #include "testutil/storage/base_fs_test.hpp" #include "storage/rocksdb/rocksdb.hpp" @@ -25,7 +24,7 @@ namespace test { void TearDown() override; std::shared_ptr rocks_; - std::shared_ptr db_; + std::shared_ptr db_; }; } // namespace test diff --git a/test/testutil/storage/in_memory/in_memory_batch.hpp b/test/testutil/storage/in_memory/in_memory_batch.hpp deleted file mode 100644 index d9a02c8da2..0000000000 --- a/test/testutil/storage/in_memory/in_memory_batch.hpp +++ /dev/null @@ -1,84 +0,0 @@ -/** - * Copyright Quadrivium LLC - * All Rights Reserved - * SPDX-License-Identifier: Apache-2.0 - */ - -#pragma once - -#include "common/buffer.hpp" -#include "storage/buffer_map_types.hpp" -#include "testutil/storage/in_memory/in_memory_spaced_storage.hpp" -#include "testutil/storage/in_memory/in_memory_storage.hpp" - -namespace kagome::storage { - using kagome::common::Buffer; - - class InMemoryBatch : public BufferBatch { - public: - explicit InMemoryBatch(InMemoryStorage &db) : db{db} {} - - outcome::result put(const BufferView &key, - BufferOrView &&value) override { - entries[key.toHex()] = std::move(value).intoBuffer(); - return outcome::success(); - } - - outcome::result remove(const BufferView &key) override { - entries.erase(key.toHex()); - return outcome::success(); - } - - outcome::result commit() override { - for (auto &entry : entries) { - OUTCOME_TRY(db.put(Buffer::fromHex(entry.first).value(), - BufferView{entry.second})); - } - return outcome::success(); - } - - void clear() override { - entries.clear(); - } - - private: - std::map entries; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members) - InMemoryStorage &db; - }; - - class InMemorySpacedBatch : public BufferSpacedBatch { - public: - explicit InMemorySpacedBatch(SpacedStorage &db) : db{db} {} - - outcome::result put(Space space, - const BufferView &key, - BufferOrView &&value) override { - entries[std::make_pair(space, key.toHex())] = - std::move(value).intoBuffer(); - return outcome::success(); - } - - outcome::result remove(Space space, const BufferView &key) override { - entries.erase(std::make_pair(space, key.toHex())); - return outcome::success(); - } - - outcome::result commit() override { - for (auto &[key, entry] : entries) { - OUTCOME_TRY(db.getSpace(key.first)->put( - Buffer::fromHex(key.second).value(), BufferView{entry})); - } - return outcome::success(); - } - - void clear() override { - entries.clear(); - } - - private: - std::map, Buffer> entries; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members) - SpacedStorage &db; - }; -} // namespace kagome::storage