From c28cc292beaccb8a8ad4300acf9808edabeef9f1 Mon Sep 17 00:00:00 2001 From: j-berman Date: Thu, 31 Oct 2024 14:10:12 -0700 Subject: [PATCH] wallet2: more performant tree building --- src/blockchain_db/CMakeLists.txt | 1 + src/blockchain_db/blockchain_db.cpp | 3 +- src/blockchain_db/blockchain_db_utils.cpp | 27 +- src/blockchain_db/blockchain_db_utils.h | 11 +- src/cryptonote_basic/account.cpp | 4 +- src/fcmp_pp/curve_trees.cpp | 209 ++++++---- src/fcmp_pp/curve_trees.h | 16 +- src/fcmp_pp/tree_sync_memory.cpp | 439 +++++++++++++++++----- src/fcmp_pp/tree_sync_memory.h | 20 + src/wallet/CMakeLists.txt | 1 + src/wallet/wallet2.cpp | 188 ++++++++- src/wallet/wallet2.h | 3 + 12 files changed, 744 insertions(+), 178 deletions(-) diff --git a/src/blockchain_db/CMakeLists.txt b/src/blockchain_db/CMakeLists.txt index cc677e8d79c..0d4ca2b84dc 100644 --- a/src/blockchain_db/CMakeLists.txt +++ b/src/blockchain_db/CMakeLists.txt @@ -34,6 +34,7 @@ target_link_libraries(blockchain_db_utils PUBLIC cncrypto cryptonote_basic + epee ringct ) diff --git a/src/blockchain_db/blockchain_db.cpp b/src/blockchain_db/blockchain_db.cpp index 3ced2ddc1ad..fc1533a2d18 100644 --- a/src/blockchain_db/blockchain_db.cpp +++ b/src/blockchain_db/blockchain_db.cpp @@ -303,8 +303,7 @@ uint64_t BlockchainDB::add_block( const std::pair& blck // When adding a block, we also need to keep track of when outputs unlock, so // we can use them to grow the merkle tree used in fcmp's at that point. - fcmp_pp::curve_trees::OutputsByUnlockBlock outs_by_unlock_block; - cryptonote::get_outs_by_unlock_block(blk.miner_tx, _txs, total_n_outputs, prev_height, outs_by_unlock_block); + const auto outs_by_unlock_block = cryptonote::get_outs_by_unlock_block(blk.miner_tx, _txs, total_n_outputs, prev_height).first; // call out to subclass implementation to add the block & metadata time1 = epee::misc_utils::get_tick_count(); diff --git a/src/blockchain_db/blockchain_db_utils.cpp b/src/blockchain_db/blockchain_db_utils.cpp index 0649c20858b..cdfa3559626 100644 --- a/src/blockchain_db/blockchain_db_utils.cpp +++ b/src/blockchain_db/blockchain_db_utils.cpp @@ -32,6 +32,8 @@ #include "cryptonote_basic/cryptonote_format_utils.h" #include "ringct/rctOps.h" +#include "profile_tools.h" + //---------------------------------------------------------------------------------------------------------------------- // Helper function to group outputs by unlock block static uint64_t set_tx_outs_by_unlock_block(const cryptonote::transaction &tx, @@ -42,6 +44,9 @@ static uint64_t set_tx_outs_by_unlock_block(const cryptonote::transaction &tx, { const uint64_t unlock_block = cryptonote::get_unlock_block_index(tx.unlock_time, block_idx); + uint64_t getting_commitment_ns = 0; + uint64_t setting_unlock_block_ns = 0; + for (std::size_t i = 0; i < tx.vout.size(); ++i) { const auto &out = tx.vout[i]; @@ -57,10 +62,14 @@ static uint64_t set_tx_outs_by_unlock_block(const cryptonote::transaction &tx, if (!miner_tx && tx.version == 2) CHECK_AND_ASSERT_THROW_MES(tx.rct_signatures.outPk.size() > i, "unexpected size of outPk"); + TIME_MEASURE_NS_START(getting_commitment); + rct::key commitment = (miner_tx || tx.version < 2) ? rct::zeroCommitVartime(out.amount) : tx.rct_signatures.outPk[i].mask; + TIME_MEASURE_NS_FINISH(getting_commitment); + auto output_pair = fcmp_pp::curve_trees::OutputPair{ .output_pubkey = std::move(output_public_key), .commitment = std::move(commitment) @@ -71,6 +80,8 @@ static uint64_t set_tx_outs_by_unlock_block(const cryptonote::transaction &tx, .output_pair = std::move(output_pair) }; + TIME_MEASURE_NS_START(setting_unlock_block); + if (outs_by_unlock_block_inout.find(unlock_block) == outs_by_unlock_block_inout.end()) { auto new_vec = std::vector{std::move(output_context)}; @@ -80,22 +91,28 @@ static uint64_t set_tx_outs_by_unlock_block(const cryptonote::transaction &tx, { outs_by_unlock_block_inout[unlock_block].emplace_back(std::move(output_context)); } + + TIME_MEASURE_NS_FINISH(setting_unlock_block); + + getting_commitment_ns += getting_commitment; + setting_unlock_block_ns += setting_unlock_block; } + LOG_PRINT_L3("getting_commitment_ms: " << getting_commitment_ns / 1000 << " , setting_unlock_block_ms: " << setting_unlock_block_ns / 1000); + return tx.vout.size(); } //---------------------------------------------------------------------------------------------------------------------- //---------------------------------------------------------------------------------------------------------------------- namespace cryptonote { -uint64_t get_outs_by_unlock_block( +std::pair get_outs_by_unlock_block( const cryptonote::transaction &miner_tx, const std::vector> &txs, const uint64_t first_output_id, - const uint64_t block_idx, - fcmp_pp::curve_trees::OutputsByUnlockBlock &outs_by_unlock_block_out) + const uint64_t block_idx) { - outs_by_unlock_block_out.clear(); + fcmp_pp::curve_trees::OutputsByUnlockBlock outs_by_unlock_block_out; uint64_t output_id = first_output_id; @@ -118,7 +135,7 @@ uint64_t get_outs_by_unlock_block( outs_by_unlock_block_out); } - return output_id; + return { outs_by_unlock_block_out, output_id }; } //---------------------------------------------------------------------------------------------------------------------- }//namespace cryptonote diff --git a/src/blockchain_db/blockchain_db_utils.h b/src/blockchain_db/blockchain_db_utils.h index 34ffd9dca95..a06f6a1b763 100644 --- a/src/blockchain_db/blockchain_db_utils.h +++ b/src/blockchain_db/blockchain_db_utils.h @@ -32,17 +32,18 @@ #include "fcmp_pp/curve_trees.h" #include +#include #include namespace cryptonote { -// This function internally relies on ringct for zeroCommit. I implemented in this blockchain_db_utils file instead of -// cryptonote_basic (where it would seem the better place to put it) to avoid a circular dependency between +// These functions internally rely on ringct for zeroCommitVartime. I implemented in this blockchain_db_utils file +// instead of cryptonote_basic (where it would seem the better place to put it) to avoid a circular dependency between // ringct <> cryptonote_basic. -uint64_t get_outs_by_unlock_block( +// Note that zeroCommitVartime causes this function to execute slowly. +std::pair get_outs_by_unlock_block( const cryptonote::transaction &miner_tx, const std::vector> &txs, const uint64_t first_output_id, - const uint64_t block_idx, - fcmp_pp::curve_trees::OutputsByUnlockBlock &outs_by_unlock_out); + const uint64_t block_idx); } diff --git a/src/cryptonote_basic/account.cpp b/src/cryptonote_basic/account.cpp index 686796ce497..3ba5638bfa9 100644 --- a/src/cryptonote_basic/account.cpp +++ b/src/cryptonote_basic/account.cpp @@ -175,8 +175,8 @@ DISABLE_VS_WARNINGS(4244 4345) struct tm timestamp = {0}; timestamp.tm_year = 2014 - 1900; // year 2014 - timestamp.tm_mon = 6 - 1; // month june - timestamp.tm_mday = 8; // 8th of june + timestamp.tm_mon = 4 - 1; // month april + timestamp.tm_mday = 15; // 15th of june timestamp.tm_hour = 0; timestamp.tm_min = 0; timestamp.tm_sec = 0; diff --git a/src/fcmp_pp/curve_trees.cpp b/src/fcmp_pp/curve_trees.cpp index 578513a4bfe..12404d1d07d 100644 --- a/src/fcmp_pp/curve_trees.cpp +++ b/src/fcmp_pp/curve_trees.cpp @@ -29,6 +29,7 @@ #include "curve_trees.h" #include "common/threadpool.h" +#include "profile_tools.h" #include "ringct/rctOps.h" #include @@ -68,23 +69,33 @@ OutputTuple output_to_tuple(const OutputPair &output_pair) const crypto::public_key &output_pubkey = output_pair.output_pubkey; const rct::key &commitment = output_pair.commitment; + TIME_MEASURE_NS_START(clear_torsion_ns); + rct::key O, C; if (!fcmp_pp::clear_torsion(rct::pk2rct(output_pubkey), O)) throw std::runtime_error("output pubkey is invalid"); if (!fcmp_pp::clear_torsion(commitment, C)) throw std::runtime_error("commitment is invalid"); + TIME_MEASURE_NS_FINISH(clear_torsion_ns); + if (O == rct::I) throw std::runtime_error("O cannot equal identity"); if (C == rct::I) throw std::runtime_error("C cannot equal identity"); + TIME_MEASURE_NS_START(derive_key_image_generator_ns); + // Must use the original output pubkey to derive I to prevent double spends, since torsioned outputs yield a // a distinct I and key image from their respective torsion cleared output (and torsioned outputs are spendable // before fcmp++) crypto::ec_point I; crypto::derive_key_image_generator(output_pubkey, I); + TIME_MEASURE_NS_FINISH(derive_key_image_generator_ns); + + LOG_PRINT_L3("clear_torsion_ns: " << clear_torsion_ns << " , derive_key_image_generator_ns: " << derive_key_image_generator_ns); + rct::key I_rct = rct::pt2rct(I); return OutputTuple{ @@ -222,82 +233,78 @@ static LayerExtension hash_children_chunks(const std::unique_ptr &curve, CHECK_AND_ASSERT_THROW_MES(chunk_width > start_offset, "start_offset must be smaller than chunk_width"); // See how many children we need to fill up the existing last chunk - std::size_t chunk_size = std::min(new_child_scalars.size(), chunk_width - start_offset); + const std::size_t first_chunk_size = std::min(new_child_scalars.size(), chunk_width - start_offset); - CHECK_AND_ASSERT_THROW_MES(new_child_scalars.size() >= chunk_size, "unexpected first chunk size"); + CHECK_AND_ASSERT_THROW_MES(new_child_scalars.size() >= first_chunk_size, "unexpected first chunk size"); const std::size_t n_chunks = 1 // first chunk - + (new_child_scalars.size() - chunk_size) / chunk_width // middle chunks - + (((new_child_scalars.size() - chunk_size) % chunk_width > 0) ? 1 : 0); // final chunk + + (new_child_scalars.size() - first_chunk_size) / chunk_width // middle chunks + + (((new_child_scalars.size() - first_chunk_size) % chunk_width > 0) ? 1 : 0); // final chunk parents_out.hashes.resize(n_chunks); - MDEBUG("First chunk_size: " << chunk_size << " , num new child scalars: " << new_child_scalars.size() - << " , start_offset: " << start_offset << " , parent layer start idx: " << parents_out.start_idx); + MDEBUG("First chunk_size: " << first_chunk_size + << " , num new child scalars: " << new_child_scalars.size() + << " , start_offset: " << start_offset + << " , parent layer start idx: " << parents_out.start_idx + << " , n chunks: " << n_chunks); - // Hash all chunks in parallel + // Hash batches of chunks in parallel tools::threadpool& tpool = tools::threadpool::getInstanceForCompute(); tools::threadpool::waiter waiter(tpool); - // Hash the first chunk - tpool.submit(&waiter, - [ - &curve, - &old_last_child, - &old_last_parent, - &new_child_scalars, - &parents_out, - start_offset, - chunk_size - ]() - { - auto &hash_out = parents_out.hashes[0]; - hash_first_chunk(curve, - old_last_child, - old_last_parent, - start_offset, - new_child_scalars, - chunk_size, - hash_out); - }, - true - ); - - // Hash chunks of child scalars to create the parent hashes - std::size_t chunk_start_idx = chunk_size; - std::size_t chunk_idx = 1; - while (chunk_start_idx < new_child_scalars.size()) + const std::size_t HASH_BATCH_SIZE = 1 + (n_chunks / (std::size_t)tpool.get_max_concurrency()); + for (std::size_t i = 0; i < n_chunks; i += HASH_BATCH_SIZE) { - // Fill a complete chunk, or add the remaining new children to the last chunk - chunk_size = std::min(chunk_width, new_child_scalars.size() - chunk_start_idx); - - CHECK_AND_ASSERT_THROW_MES(chunk_idx < parents_out.hashes.size(), "unexpected chunk_idx"); - + const std::size_t end = std::min(i + HASH_BATCH_SIZE, n_chunks); tpool.submit(&waiter, [ &curve, + &old_last_child, + &old_last_parent, &new_child_scalars, &parents_out, - chunk_start_idx, - chunk_size, - chunk_idx + start_offset, + first_chunk_size, + chunk_width, + i, + end ]() { - auto &hash_out = parents_out.hashes[chunk_idx]; - hash_next_chunk(curve, chunk_start_idx, new_child_scalars, chunk_size, hash_out); + for (std::size_t j = i; j < end; ++j) + { + auto &hash_out = parents_out.hashes[j]; + + // Hash the first chunk + if (j == 0) + { + hash_first_chunk(curve, + old_last_child, + old_last_parent, + start_offset, + new_child_scalars, + first_chunk_size, + hash_out); + continue; + } + + const std::size_t chunk_start = j * chunk_width; + + CHECK_AND_ASSERT_THROW_MES(chunk_start > start_offset, "unexpected small chunk_start"); + const std::size_t chunk_start_idx = chunk_start - start_offset; + + const std::size_t chunk_end_idx = std::min(chunk_start_idx + chunk_width, new_child_scalars.size()); + + CHECK_AND_ASSERT_THROW_MES(chunk_end_idx > chunk_start_idx, "unexpected large chunk_start_idx"); + const std::size_t chunk_size = chunk_end_idx - chunk_start_idx; + + hash_next_chunk(curve, chunk_start_idx, new_child_scalars, chunk_size, hash_out); + } }, true ); - - // Advance to the next chunk - chunk_start_idx += chunk_size; - - CHECK_AND_ASSERT_THROW_MES(chunk_start_idx <= new_child_scalars.size(), "unexpected chunk start idx"); - - ++chunk_idx; } - CHECK_AND_ASSERT_THROW_MES(chunk_idx == n_chunks, "unexpected n chunks"); CHECK_AND_ASSERT_THROW_MES(waiter.wait(), "failed to hash chunks"); return parents_out; @@ -784,6 +791,8 @@ static typename fcmp_pp::curve_trees::LayerReduction get_next_layer_re //---------------------------------------------------------------------------------------------------------------------- static PreLeafTuple output_tuple_to_pre_leaf_tuple(const OutputTuple &o) { + TIME_MEASURE_NS_START(point_to_ed_y_derivatives_ns); + PreLeafTuple plt; if (!fcmp_pp::point_to_ed_y_derivatives(o.O, plt.O_pre_x)) throw std::runtime_error("failed to get ed y derivatives from O"); @@ -792,6 +801,10 @@ static PreLeafTuple output_tuple_to_pre_leaf_tuple(const OutputTuple &o) if (!fcmp_pp::point_to_ed_y_derivatives(o.C, plt.C_pre_x)) throw std::runtime_error("failed to get ed y derivatives from C"); + TIME_MEASURE_NS_FINISH(point_to_ed_y_derivatives_ns); + + LOG_PRINT_L3("point_to_ed_y_derivatives_ns: " << point_to_ed_y_derivatives_ns); + return plt; } //---------------------------------------------------------------------------------------------------------------------- @@ -855,7 +868,7 @@ template typename CurveTrees::TreeExtension CurveTrees::get_tree_extension( const uint64_t old_n_leaf_tuples, const LastHashes &existing_last_hashes, - std::vector &&new_outputs) const + std::vector &&new_outputs) { TreeExtension tree_extension; tree_extension.leaves.start_leaf_tuple_idx = old_n_leaf_tuples; @@ -863,10 +876,14 @@ typename CurveTrees::TreeExtension CurveTrees::get_tree_extensio if (new_outputs.empty()) return tree_extension; + TIME_MEASURE_START(sorting_outputs); + // Sort the outputs by order they appear in the chain const auto sort_fn = [](const OutputContext &a, const OutputContext &b) { return a.output_id < b.output_id; }; std::sort(new_outputs.begin(), new_outputs.end(), sort_fn); + TIME_MEASURE_FINISH(sorting_outputs); + // Convert sorted outputs into leaf tuples, place each element of each leaf tuple in a flat vector to be hashed, // and place the outputs in a tree extension struct for insertion into the db. We ignore invalid outputs, since // they cannot be inserted to the tree. @@ -876,6 +893,8 @@ typename CurveTrees::TreeExtension CurveTrees::get_tree_extensio if (flattened_leaves.empty()) return tree_extension; + TIME_MEASURE_START(hashing_leaves); + MDEBUG("Getting extension for layer 0"); auto grow_layer_instructions = get_leaf_layer_grow_instructions( old_n_leaf_tuples, @@ -895,6 +914,7 @@ typename CurveTrees::TreeExtension CurveTrees::get_tree_extensio flattened_leaves, m_leaf_layer_chunk_width ); + TIME_MEASURE_FINISH(hashing_leaves); CHECK_AND_ASSERT_THROW_MES( (leaf_parents.start_idx + leaf_parents.hashes.size()) == grow_layer_instructions.new_total_parents, @@ -907,6 +927,7 @@ typename CurveTrees::TreeExtension CurveTrees::get_tree_extensio std::size_t c1_last_idx = 0; std::size_t c2_last_idx = 0; + TIME_MEASURE_START(hashing_layers); while (grow_layer_instructions.new_total_parents > 1) { MDEBUG("Getting extension for layer " << (c1_last_idx + c2_last_idx + 1)); @@ -928,6 +949,15 @@ typename CurveTrees::TreeExtension CurveTrees::get_tree_extensio parent_is_c1 = !parent_is_c1; } + TIME_MEASURE_FINISH(hashing_layers); + + m_sorting_outputs_ms += sorting_outputs; + m_hash_leaves_ms += hashing_leaves; + m_hash_layers_ms += hashing_layers; + + LOG_PRINT_L1("Total time spent hashing leaves: " << m_hash_leaves_ms / 1000 + << " , hashing layers: " << m_hash_layers_ms / 1000 + << " , sorting outputs: " << m_sorting_outputs_ms / 1000); return tree_extension; }; @@ -936,7 +966,7 @@ typename CurveTrees::TreeExtension CurveTrees::get_tree_extensio template CurveTrees::TreeExtension CurveTrees::get_tree_extension( const uint64_t old_n_leaf_tuples, const LastHashes &existing_last_hashes, - std::vector &&new_outputs) const; + std::vector &&new_outputs); //---------------------------------------------------------------------------------------------------------------------- template std::vector CurveTrees::get_trim_instructions( @@ -1305,8 +1335,10 @@ template void CurveTrees::set_valid_leaves( std::vector &flattened_leaves_out, std::vector &tuples_out, - std::vector &&new_outputs) const + std::vector &&new_outputs) { + TIME_MEASURE_START(set_valid_leaves); + // Keep track of valid outputs to make sure we only use leaves from valid outputs. Can't use std::vector // because std::vector concurrent access is not thread safe. enum Boolean : uint8_t { @@ -1318,37 +1350,45 @@ void CurveTrees::set_valid_leaves( tools::threadpool& tpool = tools::threadpool::getInstanceForCompute(); tools::threadpool::waiter waiter(tpool); + TIME_MEASURE_START(convert_valid_leaves); // Step 1. Multithreaded convert valid outputs into Edwards y derivatives needed to get Wei x coordinates - // TODO: investigate batched threading (as opposed to small tasks) std::vector pre_leaves; pre_leaves.resize(new_outputs.size()); - for (std::size_t i = 0; i < new_outputs.size(); ++i) + const std::size_t LEAF_CONVERT_BATCH_SIZE = 1 + (new_outputs.size() / (std::size_t)tpool.get_max_concurrency()); + for (std::size_t i = 0; i < new_outputs.size(); i += LEAF_CONVERT_BATCH_SIZE) { + const std::size_t end = std::min(i + LEAF_CONVERT_BATCH_SIZE, new_outputs.size()); tpool.submit(&waiter, [ &new_outputs, &valid_outputs, &pre_leaves, - i + i, + end ]() { - CHECK_AND_ASSERT_THROW_MES(valid_outputs.size() > i, "unexpected valid outputs size"); - CHECK_AND_ASSERT_THROW_MES(!valid_outputs[i], "unexpected valid output"); - CHECK_AND_ASSERT_THROW_MES(pre_leaves.size() > i, "unexpected pre_leaves size"); + for (std::size_t j = i; j < end; ++j) + { + CHECK_AND_ASSERT_THROW_MES(valid_outputs.size() > j, "unexpected valid outputs size"); + CHECK_AND_ASSERT_THROW_MES(!valid_outputs[j], "unexpected valid output"); + CHECK_AND_ASSERT_THROW_MES(pre_leaves.size() > j, "unexpected pre_leaves size"); - const auto &output_pair = new_outputs[i].output_pair; + const auto &output_pair = new_outputs[j].output_pair; - try { pre_leaves[i] = output_to_pre_leaf_tuple(output_pair); } - catch(...) { /* Invalid outputs can't be added to the tree */ return; } + try { pre_leaves[j] = output_to_pre_leaf_tuple(output_pair); } + catch(...) { /* Invalid outputs can't be added to the tree */ return; } - valid_outputs[i] = True; + valid_outputs[j] = True; + } }, true ); } CHECK_AND_ASSERT_THROW_MES(waiter.wait(), "failed to convert outputs to ed y derivatives"); + TIME_MEASURE_FINISH(convert_valid_leaves); + TIME_MEASURE_START(collect_derivatives); // Step 2. Collect valid Edwards y derivatives const std::size_t n_valid_outputs = std::count(valid_outputs.begin(), valid_outputs.end(), True); const std::size_t n_valid_leaf_elems = n_valid_outputs * LEAF_TUPLE_SIZE; @@ -1391,36 +1431,46 @@ void CurveTrees::set_valid_leaves( } CHECK_AND_ASSERT_THROW_MES(n_valid_leaf_elems == valid_i, "unexpected end valid_i"); + TIME_MEASURE_FINISH(collect_derivatives); + TIME_MEASURE_START(batch_invert); // Step 3. Get batch inverse of all valid (1-y)'s // - Batch inversion is significantly faster than inverting 1 at a time fe *inv_one_minus_y_vec = (fe *) malloc(n_valid_leaf_elems * sizeof(fe)); CHECK_AND_ASSERT_THROW_MES(inv_one_minus_y_vec, "failed malloc inv_one_minus_y_vec"); CHECK_AND_ASSERT_THROW_MES(fe_batch_invert(inv_one_minus_y_vec, one_minus_y_vec, n_valid_leaf_elems) == 0, "failed to batch invert"); + TIME_MEASURE_FINISH(batch_invert); + TIME_MEASURE_START(get_selene_scalars); // Step 4. Multithreaded get Wei x's and convert to Selene scalars - // TODO: investigate batched threading (as opposed to small tasks) flattened_leaves_out.resize(n_valid_leaf_elems); - for (std::size_t i = 0; i < n_valid_leaf_elems; ++i) + const std::size_t DERIVATION_BATCH_SIZE = 1 + (n_valid_leaf_elems / (std::size_t)tpool.get_max_concurrency()); + for (std::size_t i = 0; i < n_valid_leaf_elems; i += DERIVATION_BATCH_SIZE) { + const std::size_t end = std::min(n_valid_leaf_elems, i + DERIVATION_BATCH_SIZE); tpool.submit(&waiter, [ &inv_one_minus_y_vec, &one_plus_y_vec, &flattened_leaves_out, - i + i, + end ]() { - rct::key wei_x; - fe_ed_y_derivatives_to_wei_x(wei_x.bytes, inv_one_minus_y_vec[i], one_plus_y_vec[i]); - flattened_leaves_out[i] = tower_cycle::selene_scalar_from_bytes(wei_x); + for (std::size_t j = i; j < end; ++j) + { + rct::key wei_x; + fe_ed_y_derivatives_to_wei_x(wei_x.bytes, inv_one_minus_y_vec[j], one_plus_y_vec[j]); + flattened_leaves_out[j] = tower_cycle::selene_scalar_from_bytes(wei_x); + } }, true ); } CHECK_AND_ASSERT_THROW_MES(waiter.wait(), "failed to convert outputs to wei x coords"); + TIME_MEASURE_FINISH(get_selene_scalars); // Step 5. Set valid tuples to be stored in the db tuples_out.clear(); @@ -1440,6 +1490,21 @@ void CurveTrees::set_valid_leaves( free(one_plus_y_vec); free(one_minus_y_vec); free(inv_one_minus_y_vec); + + TIME_MEASURE_FINISH(set_valid_leaves); + + m_convert_valid_leaves_ms += convert_valid_leaves; + m_collect_derivatives_ms += collect_derivatives; + m_batch_invert_ms += batch_invert; + m_get_selene_scalars_ms += get_selene_scalars; + + m_set_valid_leaves_ms += set_valid_leaves; + + LOG_PRINT_L1("Total time spent setting leaves: " << m_set_valid_leaves_ms / 1000 + << " , converting valid leaves: " << m_convert_valid_leaves_ms / 1000 + << " , collecting derivatives: " << m_collect_derivatives_ms / 1000 + << " , batch invert: " << m_batch_invert_ms / 1000 + << " , get selene scalars: " << m_get_selene_scalars_ms / 1000); } //---------------------------------------------------------------------------------------------------------------------- template diff --git a/src/fcmp_pp/curve_trees.h b/src/fcmp_pp/curve_trees.h index be0218f9791..d9d54616c84 100644 --- a/src/fcmp_pp/curve_trees.h +++ b/src/fcmp_pp/curve_trees.h @@ -339,7 +339,7 @@ class CurveTrees // outputs to add to the tree, and return a tree extension struct that can be used to extend a tree TreeExtension get_tree_extension(const uint64_t old_n_leaf_tuples, const LastHashes &existing_last_hashes, - std::vector &&new_leaf_tuples) const; + std::vector &&new_leaf_tuples); // Get instructions useful for trimming all existing layers in the tree // - always_regrow_with_remaining will use hash_grow with remaining elems left in a chunk to "trim" every chunk, @@ -373,7 +373,7 @@ class CurveTrees void set_valid_leaves( std::vector &flattened_leaves_out, std::vector &tuples_out, - std::vector &&new_outputs) const; + std::vector &&new_outputs); // Helper function used to set the next layer extension used to grow the next layer in the tree // - for example, if we just grew the parent layer after the leaf layer, the "next layer" would be the grandparent @@ -386,6 +386,18 @@ class CurveTrees std::size_t &c2_last_idx_inout, TreeExtension &tree_extension_inout) const; +//private state +private: + uint64_t m_set_valid_leaves_ms{0}; + uint64_t m_get_selene_scalars_ms{0}; + uint64_t m_batch_invert_ms{0}; + uint64_t m_collect_derivatives_ms{0}; + uint64_t m_convert_valid_leaves_ms{0}; + + uint64_t m_sorting_outputs_ms{0}; + uint64_t m_hash_leaves_ms{0}; + uint64_t m_hash_layers_ms{0}; + //public member variables public: // The curve interfaces diff --git a/src/fcmp_pp/tree_sync_memory.cpp b/src/fcmp_pp/tree_sync_memory.cpp index e60ea7ad15d..7edef28db58 100644 --- a/src/fcmp_pp/tree_sync_memory.cpp +++ b/src/fcmp_pp/tree_sync_memory.cpp @@ -29,6 +29,7 @@ #include "tree_sync_memory.h" #include "misc_log_ex.h" +#include "profile_tools.h" #include "string_tools.h" @@ -65,7 +66,8 @@ static void assign_new_output(const OutputPair &output_pair, if (registered_output_it->second.assigned_leaf_idx) return; - MDEBUG("Starting to keep track of leaf_idx: " << leaf_idx); + LOG_PRINT_L1("Found output " << output_pair.output_pubkey << " in curve tree at leaf idx " << leaf_idx); + registered_output_it->second.assign_leaf(leaf_idx); return; @@ -156,13 +158,32 @@ static uint64_t remove_outputs_created_at_block(const CreatedBlockIdx &created_b } //---------------------------------------------------------------------------------------------------------------------- template +static void assert_tuple_slice_is_in_bounds(const typename CurveTrees::Leaves &leaves, + const uint64_t start_leaf_tuple_idx, + const uint64_t n_leaf_tuples) +{ + CHECK_AND_ASSERT_THROW_MES(start_leaf_tuple_idx >= leaves.start_leaf_tuple_idx, "start_leaf_tuple_idx too low"); + + const uint64_t n_leaf_tuples_ext = leaves.start_leaf_tuple_idx + leaves.tuples.size(); + CHECK_AND_ASSERT_THROW_MES(n_leaf_tuples_ext >= n_leaf_tuples, "n_leaf_tuples is larger than leaves extension"); + + CHECK_AND_ASSERT_THROW_MES(n_leaf_tuples >= start_leaf_tuple_idx, + "total n leaf tuples must be > start leaf tuple idx"); + + const uint64_t tuple_slice_size = n_leaf_tuples - start_leaf_tuple_idx; + CHECK_AND_ASSERT_THROW_MES(leaves.tuples.size() >= tuple_slice_size, "tuple slice size is too large"); +} +//---------------------------------------------------------------------------------------------------------------------- +template static void cache_leaf_chunk(const ChildChunkIdx chunk_idx, const std::size_t leaf_parent_chunk_width, const typename CurveTrees::Leaves &leaves, + const LeafIdx start_leaf_tuple_idx, + const uint64_t n_leaf_tuples, const bool bump_ref_count, LeafCache &leaf_cache_inout) { - const uint64_t n_leaf_tuples = leaves.start_leaf_tuple_idx + leaves.tuples.size(); + assert_tuple_slice_is_in_bounds(leaves, start_leaf_tuple_idx, n_leaf_tuples); if (n_leaf_tuples == 0) return; @@ -175,10 +196,10 @@ static void cache_leaf_chunk(const ChildChunkIdx chunk_idx, << " , start_leaf_idx: " << start_leaf_idx << " , end_leaf_idx: " << end_leaf_idx << " , bump_ref_count: " << bump_ref_count - << " , start_leaf_tuple_idx: " << leaves.start_leaf_tuple_idx); + << " , start_leaf_tuple_idx: " << start_leaf_tuple_idx); // If the leaf's chunk isn't present in this leaf extension, there are no new leaves we need to cache - if (leaves.start_leaf_tuple_idx >= end_leaf_idx) + if (start_leaf_tuple_idx >= end_leaf_idx) return; // Check if the leaf's chunk is already cached @@ -203,7 +224,7 @@ static void cache_leaf_chunk(const ChildChunkIdx chunk_idx, return; // Add the new leaves in the leaf's chunk to the cache - const LeafIdx end_tuple_idx = end_leaf_idx - leaves.start_leaf_tuple_idx; + const LeafIdx end_tuple_idx = end_leaf_idx - start_leaf_tuple_idx; CHECK_AND_ASSERT_THROW_MES(leaves.tuples.size() >= end_tuple_idx, "high end_tuple_idx"); CHECK_AND_ASSERT_THROW_MES(end_tuple_idx >= new_leaves_needed, "low end_tuple_idx"); const LeafIdx start_tuple_idx = end_tuple_idx - new_leaves_needed; @@ -229,15 +250,22 @@ static void cache_path_chunk(const std::unique_ptr &curve, const std::vector> &layer_exts, const std::size_t layer_ext_idx, const LayerIdx layer_idx, + const ChildChunkIdx layer_start_idx, const bool bump_ref_count, const ChildChunkIdx parent_idx, + const ChildChunkIdx last_parent_idx, TreeElemCache &cached_tree_elems_inout) { CHECK_AND_ASSERT_THROW_MES(layer_exts.size() > layer_ext_idx, "high layer_ext_idx"); auto &layer_ext = layer_exts[layer_ext_idx]; + CHECK_AND_ASSERT_THROW_MES(last_parent_idx >= parent_idx, "high parent_idx"); + const uint64_t n_layer_elems = last_parent_idx + 1; + CHECK_AND_ASSERT_THROW_MES(!layer_ext.hashes.empty(), "empty layer ext"); - const uint64_t n_layer_elems = layer_ext.start_idx + layer_ext.hashes.size(); + const uint64_t n_layer_elems_ext = layer_ext.start_idx + layer_ext.hashes.size(); + CHECK_AND_ASSERT_THROW_MES(n_layer_elems_ext >= n_layer_elems, "high last_parent_idx"); + CHECK_AND_ASSERT_THROW_MES(layer_start_idx >= layer_ext.start_idx, "low layer_start_idx"); const ChildChunkIdx start_chunk_idx = parent_idx * parent_width; const ChildChunkIdx end_chunk_idx = std::min(start_chunk_idx + parent_width, n_layer_elems); @@ -248,10 +276,10 @@ static void cache_path_chunk(const std::unique_ptr &curve, << " , start_chunk_idx: " << start_chunk_idx << " , end_chunk_idx: " << end_chunk_idx << " , bump_ref_count: " << bump_ref_count - << " , start_idx: " << layer_ext.start_idx); + << " , layer_start_idx: " << layer_start_idx); // If the chunk isn't in this tree extension at all, there are no elems we need to cache - if (layer_ext.start_idx >= end_chunk_idx) + if (layer_start_idx >= end_chunk_idx) return; // Check if the layer is already cached @@ -292,7 +320,7 @@ static void cache_path_chunk(const std::unique_ptr &curve, return; // Add the new elems in the chunk to the cache - const ChildChunkIdx end_i = end_chunk_idx - layer_ext.start_idx; + const ChildChunkIdx end_i = end_chunk_idx - layer_start_idx; CHECK_AND_ASSERT_THROW_MES(layer_ext.hashes.size() >= end_i, "high end_i"); CHECK_AND_ASSERT_THROW_MES(end_i >= new_elems_needed, "low end_i"); const ChildChunkIdx start_i = end_i - new_elems_needed; @@ -360,15 +388,28 @@ static void cache_path_chunks(const LeafIdx leaf_idx, const std::shared_ptr> &curve_trees, const std::vector> &c1_layer_exts, const std::vector> &c2_layer_exts, + const uint64_t start_leaf_tuple_idx, + const uint64_t n_leaf_tuples, const bool bump_ref_count, TreeElemCache &tree_elem_cache_inout) { + if (n_leaf_tuples == 0) + return; + + CHECK_AND_ASSERT_THROW_MES(n_leaf_tuples > leaf_idx, "high leaf_idx"); + + ChildChunkIdx layer_start_idx = start_leaf_tuple_idx / curve_trees->m_c2_width; + const ChildChunkIdx child_chunk_idx = leaf_idx / curve_trees->m_c2_width; ChildChunkIdx parent_idx = child_chunk_idx / curve_trees->m_c1_width; + const LeafIdx last_leaf_idx = n_leaf_tuples - 1; + const ChildChunkIdx last_chunk_idx = last_leaf_idx / curve_trees->m_c2_width; + ChildChunkIdx last_parent_idx = last_chunk_idx / curve_trees->m_c1_width; + std::size_t c1_idx = 0, c2_idx = 0; bool parent_is_c1 = true; - const std::size_t n_layers = c1_layer_exts.size() + c2_layer_exts.size(); + const std::size_t n_layers = curve_trees->n_layers(n_leaf_tuples); for (LayerIdx layer_idx = 0; layer_idx < n_layers; ++layer_idx) { MDEBUG("Caching tree elems from layer_idx " << layer_idx << " parent_idx " << parent_idx); @@ -379,12 +420,16 @@ static void cache_path_chunks(const LeafIdx leaf_idx, c2_layer_exts, c2_idx, layer_idx, + layer_start_idx, bump_ref_count, parent_idx, + last_parent_idx, tree_elem_cache_inout ); + layer_start_idx /= curve_trees->m_c1_width; parent_idx /= curve_trees->m_c2_width; + last_parent_idx /= curve_trees->m_c2_width; ++c2_idx; } else @@ -394,12 +439,16 @@ static void cache_path_chunks(const LeafIdx leaf_idx, c1_layer_exts, c1_idx, layer_idx, + layer_start_idx, bump_ref_count, parent_idx, + last_parent_idx, tree_elem_cache_inout ); + layer_start_idx /= curve_trees->m_c2_width; parent_idx /= curve_trees->m_c1_width; + last_parent_idx /= curve_trees->m_c1_width; ++c1_idx; } @@ -627,16 +676,24 @@ template static void update_registered_path(const std::shared_ptr> &curve_trees, const LeafIdx leaf_idx, const typename CurveTrees::TreeExtension &tree_extension, + const LeafIdx start_leaf_tuple_idx, + const uint64_t n_leaf_tuples, LeafCache &leaf_cache_inout, TreeElemCache &tree_elem_cach_inout) { + assert_tuple_slice_is_in_bounds(tree_extension.leaves, start_leaf_tuple_idx, n_leaf_tuples); + if (n_leaf_tuples == 0) + return; + // We only need to bump the ref count on this registered output's leaf chunk if it was just included in the tree - const bool bump_ref_count = leaf_idx >= tree_extension.leaves.start_leaf_tuple_idx; + const bool bump_ref_count = leaf_idx >= start_leaf_tuple_idx && leaf_idx < n_leaf_tuples; // Cache registered leaf's chunk cache_leaf_chunk(leaf_idx / curve_trees->m_c2_width, curve_trees->m_c2_width, tree_extension.leaves, + start_leaf_tuple_idx, + n_leaf_tuples, bump_ref_count, leaf_cache_inout); @@ -648,6 +705,8 @@ static void update_registered_path(const std::shared_ptr> &cu curve_trees, tree_extension.c1_layer_extensions, tree_extension.c2_layer_extensions, + start_leaf_tuple_idx, + n_leaf_tuples, bump_ref_count, tree_elem_cach_inout); } @@ -655,9 +714,11 @@ static void update_registered_path(const std::shared_ptr> &cu template static void cache_last_chunk_leaves(const std::shared_ptr> &curve_trees, const typename CurveTrees::Leaves &leaves, + const LeafIdx start_leaf_tuple_idx, + const uint64_t n_leaf_tuples, LeafCache &leaf_cache_inout) { - const uint64_t n_leaf_tuples = leaves.start_leaf_tuple_idx + leaves.tuples.size(); + assert_tuple_slice_is_in_bounds(leaves, start_leaf_tuple_idx, n_leaf_tuples); if (n_leaf_tuples == 0) return; @@ -670,6 +731,8 @@ static void cache_last_chunk_leaves(const std::shared_ptr> &c cache_leaf_chunk(chunk_idx, curve_trees->m_c2_width, leaves, + start_leaf_tuple_idx, + n_leaf_tuples, bump_ref_count, leaf_cache_inout); } @@ -677,9 +740,11 @@ static void cache_last_chunk_leaves(const std::shared_ptr> &c template static void cache_last_chunks(const std::shared_ptr> &curve_trees, const typename CurveTrees::TreeExtension &tree_extension, + const LeafIdx start_leaf_tuple_idx, + const uint64_t n_leaf_tuples, TreeElemCache &tree_elem_cache_inout) { - const uint64_t n_leaf_tuples = tree_extension.leaves.start_leaf_tuple_idx + tree_extension.leaves.tuples.size(); + assert_tuple_slice_is_in_bounds(tree_extension.leaves, start_leaf_tuple_idx, n_leaf_tuples); if (n_leaf_tuples == 0) return; @@ -692,6 +757,8 @@ static void cache_last_chunks(const std::shared_ptr> &curve_t curve_trees, tree_extension.c1_layer_extensions, tree_extension.c2_layer_extensions, + start_leaf_tuple_idx, + n_leaf_tuples, bump_ref_count, tree_elem_cache_inout); } @@ -757,19 +824,57 @@ bool TreeSyncMemory::register_output(const OutputPair &output, const uin // Explicit instantiation template bool TreeSyncMemory::register_output(const OutputPair &output, const uint64_t unlock_block_idx); //---------------------------------------------------------------------------------------------------------------------- -// TODO: change all code to be more precise: I should know exactly which tree elems I need. Don't go by what's stored template void TreeSyncMemory::sync_block(const uint64_t block_idx, const crypto::hash &block_hash, const crypto::hash &prev_block_hash, const fcmp_pp::curve_trees::OutputsByUnlockBlock &outs_by_unlock_block) { + typename fcmp_pp::curve_trees::CurveTrees::TreeExtension tree_extension; + std::vector n_new_leaf_tuples_per_block; + + const auto new_block_hashes = {block_hash}; + this->sync_blocks(block_idx == 0 ? 0 : (block_idx - 1), + prev_block_hash, + new_block_hashes, + {outs_by_unlock_block}, + tree_extension, + n_new_leaf_tuples_per_block); + + this->process_synced_blocks(block_idx, new_block_hashes, n_new_leaf_tuples_per_block, tree_extension); +} + +// Explicit instantiation +template void TreeSyncMemory::sync_block(const uint64_t block_idx, + const crypto::hash &block_hash, + const crypto::hash &prev_block_hash, + const fcmp_pp::curve_trees::OutputsByUnlockBlock &outs_by_unlock_block); +//---------------------------------------------------------------------------------------------------------------------- +template +void TreeSyncMemory::sync_blocks(const uint64_t prev_last_block_idx, + const crypto::hash &prev_block_hash, + const std::vector &new_block_hashes, + const std::vector &outs_by_unlock_blocks, + typename fcmp_pp::curve_trees::CurveTrees::TreeExtension &tree_extension_out, + std::vector &n_new_leaf_tuples_per_block_out) +{ + CHECK_AND_ASSERT_THROW_MES(new_block_hashes.size() == outs_by_unlock_blocks.size(), "size mismatch sync_blocks"); + + tree_extension_out = typename fcmp_pp::curve_trees::CurveTrees::TreeExtension{}; + n_new_leaf_tuples_per_block_out.clear(); + + const uint64_t n_new_blocks = (uint64_t) new_block_hashes.size(); + if (n_new_blocks == 0) + return; + // Pre-checks - std::size_t n_leaf_tuples = 0; + uint64_t first_block_idx = 0; + uint64_t n_leaf_tuples = 0; if (m_cached_blocks.empty()) { // TODO: if block_idx > 0, we need the tree's last chunk elems and old_n_leaf_tuples - CHECK_AND_ASSERT_THROW_MES(block_idx == 0, "syncing first block_idx > 0 not yet implemented"); + CHECK_AND_ASSERT_THROW_MES(prev_last_block_idx == 0, "syncing first block_idx > 0 not yet implemented"); + CHECK_AND_ASSERT_THROW_MES(prev_block_hash == crypto::null_hash, "expected null prev last hash"); // Make sure all blockchain containers are empty CHECK_AND_ASSERT_THROW_MES(m_cached_blocks.empty(), "expected empty cached blocks"); @@ -778,108 +883,237 @@ void TreeSyncMemory::sync_block(const uint64_t block_idx, } else { - CHECK_AND_ASSERT_THROW_MES(block_idx > 0, "expected block_idx > 0"); - // Make sure provided block is contiguous to prior synced block const auto &prev_block = m_cached_blocks.back(); - CHECK_AND_ASSERT_THROW_MES(prev_block.blk_idx == (block_idx - 1), "failed contiguity idx check"); + + CHECK_AND_ASSERT_THROW_MES(prev_block.blk_idx == prev_last_block_idx, "failed contiguity idx check"); CHECK_AND_ASSERT_THROW_MES(prev_block.blk_hash == prev_block_hash, "failed contiguity hash check"); + first_block_idx = prev_last_block_idx + 1; n_leaf_tuples = prev_block.n_leaf_tuples; } - // Get the outputs spendable in the next block from the cache. We keep these outputs in the cache in case there's a - // reorg, and trimmed outputs would re-enter the locked outputs cache. - auto spendable_outputs = m_locked_outputs[block_idx]; + // Update the locked outputs cache with all outputs set to unlock, and collect unlocked outputs and output id's + // TODO: this approach is doing many unnecessary copies, avoid the copies + TIME_MEASURE_START(getting_unlocked_outputs); + std::vector unlocked_outputs; + std::vector> unlocked_output_ids_by_block; + for (uint64_t i = 0; i < n_new_blocks; ++i) + { + const BlockIdx blk_idx = first_block_idx + i; + + m_output_count += add_to_locked_outputs_cache(outs_by_unlock_blocks[i], + blk_idx, + m_locked_outputs, + m_locked_output_refs + ); + + const auto &unlocked_outputs_in_blk = m_locked_outputs[blk_idx]; + unlocked_outputs.insert( + unlocked_outputs.end(), + unlocked_outputs_in_blk.begin(), + unlocked_outputs_in_blk.end() + ); + + // Collect unlock output id's by block + std::vector unlocked_output_ids; + unlocked_output_ids.reserve(unlocked_outputs_in_blk.size()); + for (const auto &unlocked_output : unlocked_outputs_in_blk) + unlocked_output_ids.push_back(unlocked_output.output_id); + unlocked_output_ids_by_block.emplace_back(std::move(unlocked_output_ids)); + } + TIME_MEASURE_FINISH(getting_unlocked_outputs); + TIME_MEASURE_START(getting_tree_extension); // Get the tree extension using existing tree data. We'll use the tree extension to update registered output paths // in the tree and cache the data necessary to either build the next block's tree extension or pop the block. - const auto tree_extension = TreeSync::m_curve_trees->get_tree_extension( + const std::size_t n_unlocked_outputs = unlocked_outputs.size(); + tree_extension_out = TreeSync::m_curve_trees->get_tree_extension( n_leaf_tuples, this->get_last_hashes(n_leaf_tuples), - std::move(spendable_outputs)); + std::move(unlocked_outputs)); - // Now that we've grown the tree with outputs that unlock this block, we add the outputs from this block to the - // locked outputs cache so that they'll be used to grow the tree when they unlock. - m_output_count += add_to_locked_outputs_cache(outs_by_unlock_block, - block_idx, - m_locked_outputs, - m_locked_output_refs); + CHECK_AND_ASSERT_THROW_MES(n_unlocked_outputs >= tree_extension_out.leaves.tuples.size(), "unexpected new n tuples"); - // Update the existing last hashes in the cache using the tree extension - update_existing_last_hashes(TreeSync::m_curve_trees, tree_extension, m_tree_elem_cache); + TIME_MEASURE_FINISH(getting_tree_extension); - // Check if any registered outputs are present in the tree extension. If so, we assign the output its leaf idx and - // start keeping track of the output's path elems - for (uint64_t i = 0; i < tree_extension.leaves.tuples.size(); ++i) + // Read the tree extension and determine n leaf tuples added per block + n_new_leaf_tuples_per_block_out.reserve(n_new_blocks); + auto new_leaf_tuple_it = tree_extension_out.leaves.tuples.begin(); + for (uint64_t i = 0; i < n_new_blocks; ++i) { - const auto &output_pair = tree_extension.leaves.tuples[i].output_pair; - const LeafIdx leaf_idx = tree_extension.leaves.start_leaf_tuple_idx + i; - assign_new_output(output_pair, leaf_idx, m_registered_outputs); + uint64_t n_leaf_tuples_in_block = 0; + + const auto &unlocked_output_ids = unlocked_output_ids_by_block[i]; + for (const uint64_t output_id : unlocked_output_ids) + { + uint64_t next_included_output_id = new_leaf_tuple_it->output_id; + CHECK_AND_ASSERT_THROW_MES(next_included_output_id >= output_id, "unexpected high output_id"); + if (output_id == next_included_output_id) + { + ++n_leaf_tuples_in_block; + ++new_leaf_tuple_it; + } + } + + n_new_leaf_tuples_per_block_out.push_back(n_leaf_tuples_in_block); } - // Cache tree elems from the tree extension needed in order to keep track of registered output paths in the tree - for (const auto ®istered_o : m_registered_outputs) + m_getting_unlocked_outs_ms += getting_unlocked_outputs; + m_getting_tree_extension_ms += getting_tree_extension; + + LOG_PRINT_L1("Total time getting unlocked outs: " << m_getting_unlocked_outs_ms / 1000 + << " , getting tree extension: " << m_getting_tree_extension_ms / 1000); +} + +// Explicit instantiation +template void TreeSyncMemory::sync_blocks(const uint64_t prev_last_block_idx, + const crypto::hash &prev_block_hash, + const std::vector &new_block_hashes, + const std::vector &outs_by_unlock_blocks, + typename fcmp_pp::curve_trees::CurveTrees::TreeExtension &tree_extension_out, + std::vector &n_new_leaf_tuples_per_block_out); +//---------------------------------------------------------------------------------------------------------------------- +template +void TreeSyncMemory::process_synced_blocks(const uint64_t n_blocks_already_synced, + const std::vector &new_block_hashes, + const std::vector &n_new_leaf_tuples_per_block, + const typename fcmp_pp::curve_trees::CurveTrees::TreeExtension &tree_extension) +{ + // Pre-checks + CHECK_AND_ASSERT_THROW_MES(new_block_hashes.size() == n_new_leaf_tuples_per_block.size(), + "size mismatch process synced blocks"); + + uint64_t n_leaf_tuples = 0; + if (m_cached_blocks.empty()) { - // Skip all registered outputs which have not been included in the tree yet - if (!registered_o.second.assigned_leaf_idx) - continue; + // TODO: if block_idx > 0, we need the tree's last chunk elems and old_n_leaf_tuples + CHECK_AND_ASSERT_THROW_MES(n_blocks_already_synced == 0, "syncing first block_idx > 0 not yet implemented"); + + // Make sure all blockchain containers are empty + CHECK_AND_ASSERT_THROW_MES(m_cached_blocks.empty(), "expected empty cached blocks"); + CHECK_AND_ASSERT_THROW_MES(m_leaf_cache.empty(), "expected empty cached leaves"); + CHECK_AND_ASSERT_THROW_MES(m_tree_elem_cache.empty(), "expected empty cached tree elems"); + } + else + { + CHECK_AND_ASSERT_THROW_MES(n_blocks_already_synced > 0, "expected n_blocks_already_synced > 0"); + + // Make sure provided block is contiguous to prior synced block + const auto &prev_block = m_cached_blocks.back(); + CHECK_AND_ASSERT_THROW_MES((prev_block.blk_idx + 1) == n_blocks_already_synced, "failed contiguity idx check"); + + n_leaf_tuples = prev_block.n_leaf_tuples; + } + + // Update the existing last hashes in the cache using the tree extension + update_existing_last_hashes(TreeSync::m_curve_trees, tree_extension, m_tree_elem_cache); + + // Go block-by-block using slices of the tree extension to update values in the cache + uint64_t tuple_idx_start_slice = 0; + for (std::size_t i = 0; i < new_block_hashes.size(); ++i) + { + const uint64_t n_new_leaf_tuples = n_new_leaf_tuples_per_block[i]; + n_leaf_tuples += n_new_leaf_tuples; + + const LeafIdx start_leaf_tuple_idx = tree_extension.leaves.start_leaf_tuple_idx + tuple_idx_start_slice; + + // Check if any registered outputs are present in the tree extension. If so, we assign the output its leaf idx + // and start keeping track of the output's path elems + for (uint64_t i = 0; i < n_new_leaf_tuples; ++i) + { + const LeafIdx tuple_idx = tuple_idx_start_slice + i; + CHECK_AND_ASSERT_THROW_MES(tree_extension.leaves.tuples.size() > tuple_idx, "unexpected tuple_idx"); + + const auto &output_pair = tree_extension.leaves.tuples[tuple_idx].output_pair; + const LeafIdx leaf_idx = start_leaf_tuple_idx + i; + assign_new_output(output_pair, leaf_idx, m_registered_outputs); + } + tuple_idx_start_slice += n_new_leaf_tuples; + + // Cache tree elems from the tree extension needed in order to keep track of registered output paths in the tree + for (const auto ®istered_o : m_registered_outputs) + { + // Skip all registered outputs which have not been included in the tree yet + if (!registered_o.second.assigned_leaf_idx) + continue; + + update_registered_path(TreeSync::m_curve_trees, + registered_o.second.leaf_idx, + tree_extension, + start_leaf_tuple_idx, + n_leaf_tuples, + m_leaf_cache, + m_tree_elem_cache); + } - update_registered_path(TreeSync::m_curve_trees, - registered_o.second.leaf_idx, + // Cache the last chunk of leaves, so if a registered output appears in the first chunk next block, we'll have + // all prior leaves from that output's chunk already saved + cache_last_chunk_leaves(TreeSync::m_curve_trees, + tree_extension.leaves, + start_leaf_tuple_idx, + n_leaf_tuples, + m_leaf_cache); + + // Cache the last chunk of hashes from every layer. We need to do this to handle all of the following: + // 1) So we can use the tree's last hashes to grow the tree from here next block. + // 2) In case a registered output appears in the first chunk next block, we'll have all its path elems cached. + // 3) To trim the tree on reorg by re-growing with the children in each last chunk. + cache_last_chunks(TreeSync::m_curve_trees, tree_extension, - m_leaf_cache, + start_leaf_tuple_idx, + n_leaf_tuples, m_tree_elem_cache); - } - // Cache the last chunk of leaves, so if a registered output appears in the first chunk next block, we'll have all - // prior leaves from that output's chunk already saved - cache_last_chunk_leaves(TreeSync::m_curve_trees, tree_extension.leaves, m_leaf_cache); - - // Cache the last chunk of hashes from every layer. We need to do this to handle all of the following: - // 1) So we can use the tree's last hashes to grow the tree from here next block. - // 2) In case a registered output appears in the first chunk next block, we'll have all its path elems cached. - // 3) To trim the tree on reorg by re-growing with the children in each last chunk. - cache_last_chunks(TreeSync::m_curve_trees, tree_extension, m_tree_elem_cache); - - // Update cached blocks - const uint64_t new_total_n_leaf_tuples = n_leaf_tuples + tree_extension.leaves.tuples.size(); - auto blk_meta = BlockMeta { - .blk_idx = block_idx, - .blk_hash = block_hash, - .n_leaf_tuples = new_total_n_leaf_tuples, + // Enqueue block meta + const BlockIdx blk_idx = n_blocks_already_synced + i; + const auto &blk_hash = new_block_hashes[i]; + auto blk_meta = BlockMeta { + .blk_idx = blk_idx, + .blk_hash = blk_hash, + .n_leaf_tuples = n_leaf_tuples, }; - m_cached_blocks.push_back(std::move(blk_meta)); + m_cached_blocks.push_back(std::move(blk_meta)); - // Deque the oldest cached block upon reaching the max reorg depth - if ((uint64_t)m_cached_blocks.size() > TreeSync::m_max_reorg_depth) - { - CHECK_AND_ASSERT_THROW_MES(!m_cached_blocks.empty(), "empty cached blocks"); - const auto &oldest_block = m_cached_blocks.front(); + // Deque the oldest cached block upon reaching the max reorg depth + if ((uint64_t)m_cached_blocks.size() > TreeSync::m_max_reorg_depth) + { + CHECK_AND_ASSERT_THROW_MES(!m_cached_blocks.empty(), "empty cached blocks"); + const auto &oldest_block = m_cached_blocks.front(); - // All locked outputs that unlocked in the oldest block idx should already be in the tree. We keep them cached - // to handle reorgs (in case an output trimmed from the tree is supposed to re-enter the cache). We don't need - // to keep them past the reorg depth. - m_locked_outputs.erase(/*UnlockBlockIdx*/oldest_block.blk_idx); + // All locked outputs that unlocked in the oldest block idx should already be in the tree. We keep them cached + // to handle reorgs (in case an output trimmed from the tree is supposed to re-enter the cache). We don't need + // to keep them past the reorg depth. + m_locked_outputs.erase(/*UnlockBlockIdx*/oldest_block.blk_idx); - // We keep locked output refs around for outputs *created* in the oldest block, so we can quickly remove them - // from the locked outputs cache upon popping the block. Once the reorg depth is exceeded, we can't remove those - // outputs anyway, so remove from the cache. - m_locked_output_refs.erase(/*CreatedBlockIdx*/oldest_block.blk_idx); + // We keep locked output refs around for outputs *created* in the oldest block, so we can quickly remove them + // from the locked outputs cache upon popping the block. Once the reorg depth is exceeded, we can't remove those + // outputs anyway, so remove from the cache. + m_locked_output_refs.erase(/*CreatedBlockIdx*/oldest_block.blk_idx); - this->deque_block(oldest_block); - m_cached_blocks.pop_front(); + this->deque_block(oldest_block); + m_cached_blocks.pop_front(); + } } + CHECK_AND_ASSERT_THROW_MES(tuple_idx_start_slice == tree_extension.leaves.tuples.size(), + "did not account for all new leaf tuples"); if ((uint64_t)m_cached_blocks.size() > TreeSync::m_max_reorg_depth) LOG_ERROR("Cached blocks exceeded max reorg depth"); + + const uint64_t blk_idx = m_cached_blocks.back().blk_idx; + const auto &blk_hash = m_cached_blocks.back().blk_hash; + if (n_leaf_tuples > 0 && (blk_idx % 1000) == 0) + { + const std::string tree_root = epee::string_tools::pod_to_hex(this->get_tree_root()); + MINFO("Block idx: " << blk_idx << ", block: " << blk_hash << ", tree root: " << tree_root << ", leaves: " << n_leaf_tuples); + } } -// Explicit instantiation -template void TreeSyncMemory::sync_block(const uint64_t block_idx, - const crypto::hash &block_hash, - const crypto::hash &prev_block_hash, - const fcmp_pp::curve_trees::OutputsByUnlockBlock &outs_by_unlock_block); +template void TreeSyncMemory::process_synced_blocks(const uint64_t n_blocks_already_synced, + const std::vector &new_block_hashes, + const std::vector &n_new_leaf_tuples_per_block, + const typename fcmp_pp::curve_trees::CurveTrees::TreeExtension &tree_extension); //---------------------------------------------------------------------------------------------------------------------- template bool TreeSyncMemory::pop_block() @@ -1058,6 +1292,43 @@ template bool TreeSyncMemory::get_output_path(const OutputPair & //---------------------------------------------------------------------------------------------------------------------- //---------------------------------------------------------------------------------------------------------------------- template +std::array TreeSyncMemory::get_tree_root() const +{ + CHECK_AND_ASSERT_THROW_MES(!m_cached_blocks.empty(), "empty cache"); + + const uint64_t n_leaf_tuples = m_cached_blocks.back().n_leaf_tuples; + CHECK_AND_ASSERT_THROW_MES(n_leaf_tuples > 0, "empty tree"); + + const std::size_t n_layers = TreeSync::m_curve_trees->n_layers(n_leaf_tuples); + CHECK_AND_ASSERT_THROW_MES(n_layers > 0, "n_layers must be > 0"); + + const LayerIdx root_layer_idx = n_layers - 1; + + const auto root_layer_it = m_tree_elem_cache.find(root_layer_idx); + CHECK_AND_ASSERT_THROW_MES(root_layer_it != m_tree_elem_cache.end(), "did not find root layer"); + + const auto root_chunk_it = root_layer_it->second.find(0); + CHECK_AND_ASSERT_THROW_MES(root_chunk_it != root_layer_it->second.end(), "did not find root chunk"); + + CHECK_AND_ASSERT_THROW_MES(root_chunk_it->second.tree_elems.size() == 1, "unexpected size of root layer chunk"); + + return root_chunk_it->second.tree_elems.front(); +} + +// Explicit instantiation +template std::array TreeSyncMemory::get_tree_root() const; +//---------------------------------------------------------------------------------------------------------------------- +template +uint64_t TreeSyncMemory::get_n_leaf_tuples() const +{ + CHECK_AND_ASSERT_THROW_MES(!m_cached_blocks.empty(), "empty cache"); + return m_cached_blocks.back().n_leaf_tuples; +} + +// Explicit instantiation +template uint64_t TreeSyncMemory::get_n_leaf_tuples() const; +//---------------------------------------------------------------------------------------------------------------------- +template void TreeSyncMemory::clear() { m_locked_outputs.clear(); diff --git a/src/fcmp_pp/tree_sync_memory.h b/src/fcmp_pp/tree_sync_memory.h index 5345dff2c35..0e2aafe159e 100644 --- a/src/fcmp_pp/tree_sync_memory.h +++ b/src/fcmp_pp/tree_sync_memory.h @@ -194,8 +194,24 @@ class TreeSyncMemory final : public TreeSync // Public functions not part of TreeSync interface public: + // TODO: make this part of the TreeSync interface + std::array get_tree_root() const; + uint64_t get_n_leaf_tuples() const; + uint64_t get_output_count() const { return m_output_count; } + void sync_blocks(const uint64_t prev_last_block_idx, + const crypto::hash &prev_block_hash, + const std::vector &new_block_hashes, + const std::vector &outs_by_unlock_blocks, + typename fcmp_pp::curve_trees::CurveTrees::TreeExtension &tree_extension_out, + std::vector &n_new_leaf_tuples_per_block_out); + + void process_synced_blocks(const uint64_t n_blocks_already_synced, + const std::vector &new_block_hashes, + const std::vector &n_new_leaf_tuples_per_block, + const typename fcmp_pp::curve_trees::CurveTrees::TreeExtension &tree_extension); + // Clear all state void clear(); @@ -232,6 +248,10 @@ class TreeSyncMemory final : public TreeSync // the tree extensions and reductions for each block correctly locally when syncing. std::deque m_cached_blocks; + uint64_t m_getting_unlocked_outs_ms; + uint64_t m_getting_tree_extension_ms; + uint64_t m_updating_cache_values_ms; + // Serialization public: template diff --git a/src/wallet/CMakeLists.txt b/src/wallet/CMakeLists.txt index 83ae2a37156..cb028f16d26 100644 --- a/src/wallet/CMakeLists.txt +++ b/src/wallet/CMakeLists.txt @@ -48,6 +48,7 @@ monero_add_library(wallet ${wallet_private_headers}) target_link_libraries(wallet PUBLIC + blockchain_db_utils rpc_base multisig common diff --git a/src/wallet/wallet2.cpp b/src/wallet/wallet2.cpp index 990c47112ad..dd99ba2d5a7 100644 --- a/src/wallet/wallet2.cpp +++ b/src/wallet/wallet2.cpp @@ -49,6 +49,7 @@ #include "include_base_utils.h" using namespace epee; +#include "blockchain_db/blockchain_db_utils.h" #include "cryptonote_config.h" #include "hardforks/hardforks.h" #include "cryptonote_core/tx_sanity_check.h" @@ -1022,6 +1023,15 @@ crypto::chacha_key derive_cache_key(const crypto::chacha_key& keys_data_key, con return cache_key; } + +void sync_genesis_block(const crypto::hash &genesis_hash, + const cryptonote::transaction &genesis_tx, + fcmp_pp::curve_trees::TreeSyncMemory &tree_sync_inout) +{ + tree_sync_inout.clear(); + const auto outs_by_unlock_block = cryptonote::get_outs_by_unlock_block(genesis_tx, {}, 0, 0).first; + tree_sync_inout.sync_block(0, genesis_hash, crypto::hash{}, outs_by_unlock_block); +} //----------------------------------------------------------------- } //namespace @@ -2275,7 +2285,7 @@ bool wallet2::spends_one_of_ours(const cryptonote::transaction &tx) const //---------------------------------------------------------------------------------------------------- void wallet2::process_new_transaction(const crypto::hash &txid, const cryptonote::transaction& tx, const std::vector &o_indices, uint64_t height, uint8_t block_version, uint64_t ts, bool miner_tx, bool pool, bool double_spend_seen, const tx_cache_data &tx_cache_data, std::map, size_t> *output_tracker_cache, bool ignore_callbacks) { - PERF_TIMER(process_new_transaction); + // PERF_TIMER(process_new_transaction); // In this function, tx (probably) only contains the base information // (that is, the prunable stuff may or may not be included) if (!miner_tx && !pool) @@ -2470,11 +2480,18 @@ void wallet2::process_new_transaction(const crypto::hash &txid, const cryptonote THROW_WALLET_EXCEPTION_IF(tx.vout.size() <= o, error::wallet_internal_error, "wrong out in transaction: internal index=" + std::to_string(o) + ", total_outs=" + std::to_string(tx.vout.size())); + THROW_WALLET_EXCEPTION_IF(tx.vout[o].amount == 0 && tx.rct_signatures.outPk.size() <= o, + error::wallet_internal_error, "incorrect size of rct signatures"); + auto kit = m_pub_keys.find(tx_scan_info[o].in_ephemeral.pub); THROW_WALLET_EXCEPTION_IF(kit != m_pub_keys.end() && kit->second >= m_transfers.size(), error::wallet_internal_error, std::string("Unexpected transfer index from public key: ") + "got " + (kit == m_pub_keys.end() ? "" : boost::lexical_cast(kit->second)) + ", m_transfers.size() is " + boost::lexical_cast(m_transfers.size())); + const fcmp_pp::curve_trees::OutputPair output_pair{ + .output_pubkey = tx_scan_info[o].in_ephemeral.pub, + .commitment = tx.vout[o].amount == 0 ? tx.rct_signatures.outPk[o].mask : rct::zeroCommitVartime(tx.vout[o].amount) + }; if (kit == m_pub_keys.end()) { uint64_t amount = tx.vout[o].amount ? tx.vout[o].amount : tx_scan_info[o].amount; @@ -2546,6 +2563,7 @@ void wallet2::process_new_transaction(const crypto::hash &txid, const cryptonote LOG_PRINT_L0("Received money: " << print_money(td.amount()) << ", with tx: " << txid); if (!ignore_callbacks && 0 != m_callback) m_callback->on_money_received(height, txid, tx, td.m_amount, 0, td.m_subaddr_index, spends_one_of_ours(tx), td.m_tx.unlock_time); + m_tree_sync.register_output(output_pair, cryptonote::get_unlock_block_index(tx.unlock_time, height)); } total_received_1 += amount; notify = true; @@ -2624,6 +2642,7 @@ void wallet2::process_new_transaction(const crypto::hash &txid, const cryptonote LOG_PRINT_L0("Received money: " << print_money(td.amount()) << ", with tx: " << txid); if (!ignore_callbacks && 0 != m_callback) m_callback->on_money_received(height, txid, tx, td.m_amount, burnt, td.m_subaddr_index, spends_one_of_ours(tx), td.m_tx.unlock_time); + m_tree_sync.register_output(output_pair, cryptonote::get_unlock_block_index(tx.unlock_time, height)); } total_received_1 += extra_amount; notify = true; @@ -2995,8 +3014,19 @@ void wallet2::process_new_blockchain_entry(const cryptonote::block& b, const cry process_new_transaction(b.tx_hashes[idx], parsed_block.txes[idx], parsed_block.o_indices.indices[idx+1].indices, height, b.major_version, b.timestamp, false, false, false, tx_cache_data[tx_cache_data_offset++], output_tracker_cache); } TIME_MEASURE_FINISH(txs_handle_time); + + TIME_MEASURE_START(build_tree_time); + // std::vector> txs; + // txs.reserve(b.tx_hashes.size()); + // for (size_t idx = 0; idx < b.tx_hashes.size(); ++idx) + // txs.push_back(std::ref(parsed_block.txes[idx])); + // // TODO: get block's tree root and verify TreeSync's root with chain's + // const auto outs_by_unlock_block = cryptonote::get_outs_by_unlock_block(b.miner_tx, txs, m_tree_sync.get_output_count(), height); + // m_tree_sync.sync_block(height, bl_id, parsed_block.block.prev_id, outs_by_unlock_block); + TIME_MEASURE_FINISH(build_tree_time); + m_last_block_reward = cryptonote::get_outs_money_amount(b.miner_tx); - LOG_PRINT_L2("Processed block: " << bl_id << ", height " << height << ", " << miner_tx_handle_time + txs_handle_time << "(" << miner_tx_handle_time << "/" << txs_handle_time <<")ms"); + LOG_PRINT_L2("Processed block: " << bl_id << ", height " << height << ", " << miner_tx_handle_time + txs_handle_time + build_tree_time << "(" << miner_tx_handle_time << "," << txs_handle_time << "," << build_tree_time << ")ms"); }else { if (!(height % 128)) @@ -3132,7 +3162,7 @@ void wallet2::pull_blocks(bool first, bool try_incremental, uint64_t start_heigh req.prune = true; req.start_height = start_height; - req.no_miner_tx = m_refresh_type == RefreshNoCoinbase; + req.no_miner_tx = false; // always need the miner tx so we can grow the tree correctly req.requested_info = (first && !m_background_syncing) ? COMMAND_RPC_GET_BLOCKS_FAST::BLOCKS_AND_POOL : COMMAND_RPC_GET_BLOCKS_FAST::BLOCKS_ONLY; if (try_incremental && !m_background_syncing) @@ -3214,6 +3244,94 @@ void wallet2::process_parsed_blocks(const uint64_t start_height, const std::vect if (has_prev_block) { prev_block_id = m_blockchain[start_height - 1]; } + + // Use outputs to build the curve tree in a separate thread + tools::threadpool::waiter tree_sync_waiter(tpool); + const uint64_t n_blocks_already_synced = (uint64_t) m_blockchain.size(); + THROW_WALLET_EXCEPTION_IF(n_blocks_already_synced == 0, error::wallet_internal_error, "n_blocks_already_synced == 0"); + + fcmp_pp::curve_trees::CurveTreesV1::TreeExtension tree_extension; + std::vector new_leaf_tuples_per_block; + std::vector new_block_hashes; + + tpool.submit(&tree_sync_waiter, [this, &parsed_blocks, &tree_extension, &new_leaf_tuples_per_block, &new_block_hashes, start_height, n_blocks_already_synced]() { + TIME_MEASURE_START(sync_blocks_time); + + const uint64_t n_downloaded_blocks = (uint64_t) parsed_blocks.size(); + if (n_downloaded_blocks == 0) + return; + + const uint64_t last_block_idx = start_height + n_downloaded_blocks - 1; + THROW_WALLET_EXCEPTION_IF(n_blocks_already_synced > last_block_idx, error::wallet_internal_error, + "already synced block idx"); + + const uint64_t n_new_blocks = (last_block_idx + 1) - n_blocks_already_synced; + MDEBUG("last_block_idx: " << last_block_idx + << ", start_height: " << start_height + << ", n_downloaded_blocks: " << n_downloaded_blocks + << ", n_blocks_already_synced: " << n_blocks_already_synced + << ", n_new_blocks: " << n_new_blocks); + + // Contiguity check on already synced blocks + THROW_WALLET_EXCEPTION_IF(n_downloaded_blocks >= n_new_blocks, error::wallet_internal_error, + "n_downloaded_blocks must be >= n_new_blocks"); + const uint64_t next_block_idx = n_downloaded_blocks - n_new_blocks; + + // TODO: Check contiguity and handle reorgs if not contigous + uint64_t prev_last_block_idx = n_blocks_already_synced - 1; + crypto::hash prev_block_hash = parsed_blocks[next_block_idx].block.prev_id; + + // We only need outputs from blocks contiguous to the already synced height + + if (n_new_blocks == 0) + return; + + // Collect all outs from all blocks by unlock block + TIME_MEASURE_START(collecting_outs_by_unlock_block); + std::vector outs_by_unlock_blocks; + + new_block_hashes.reserve(n_new_blocks); + outs_by_unlock_blocks.reserve(n_new_blocks); + + uint64_t first_output_id = m_tree_sync.get_output_count(); + for (size_t i = next_block_idx; i < n_new_blocks; ++i) + { + new_block_hashes.push_back(parsed_blocks[i].block.hash); + + const auto &miner_tx = std::ref(parsed_blocks[i].block.miner_tx); + std::vector> txs; + txs.reserve(parsed_blocks[i].txes.size()); + for (size_t j = 0; j < parsed_blocks[i].txes.size(); ++j) + txs.push_back(std::ref(parsed_blocks[i].txes[j])); + + // Note: this function is slow because of zeroCommitVartime + auto res = cryptonote::get_outs_by_unlock_block(miner_tx, txs, first_output_id, prev_last_block_idx); + + outs_by_unlock_blocks.emplace_back(std::move(res.first)); + first_output_id = res.second + 1; + } + + TIME_MEASURE_FINISH(collecting_outs_by_unlock_block); + + // Get a tree extension with the outputs that will unlock in this chunk of blocks + m_tree_sync.sync_blocks(prev_last_block_idx, + prev_block_hash, + new_block_hashes, + outs_by_unlock_blocks, + tree_extension, + new_leaf_tuples_per_block); + + TIME_MEASURE_FINISH(sync_blocks_time); + + m_outs_by_unlock_time_ms += collecting_outs_by_unlock_block; + m_sync_blocks_time_ms += sync_blocks_time; + + LOG_PRINT_L1("Total time spent building tree: " << m_sync_blocks_time_ms / 1000 + << " , time spent collecting outs by unlock time while building tree: " << m_outs_by_unlock_time_ms / 1000); + }); + // After identifying received outputs in parallel the rest of this function, + // we will then process the synced blocks at the end of this function. + for (size_t i = 0; i < blocks.size(); ++i) { if (has_prev_block) { @@ -3399,6 +3517,15 @@ void wallet2::process_parsed_blocks(const uint64_t start_height, const std::vect ++current_index; tx_cache_data_offset += 1 + parsed_blocks[i].txes.size(); } + + // Now that we've processed all received outputs, call process_synced_blocks + // to save the elems we need from the tree, and get rid of the rest of the + // tree elems we no longer need. + LOG_PRINT_L1("Waiting on tree sync"); + THROW_WALLET_EXCEPTION_IF(!tree_sync_waiter.wait(), error::wallet_internal_error, "Exception in thread pool"); + LOG_PRINT_L1("Done waiting on tree sync"); + // TODO: make sure top block hash matches top of tree extension + m_tree_sync.process_synced_blocks(n_blocks_already_synced, new_block_hashes, new_leaf_tuples_per_block, tree_extension); } //---------------------------------------------------------------------------------------------------- void wallet2::refresh(bool trusted_daemon) @@ -4113,7 +4240,9 @@ void wallet2::refresh(bool trusted_daemon, uint64_t start_height, uint64_t & blo cryptonote::block b; generate_genesis(b); m_blockchain.clear(); - m_blockchain.push_back(get_block_hash(b)); + const crypto::hash genesis_hash = get_block_hash(b); + m_blockchain.push_back(genesis_hash); + sync_genesis_block(genesis_hash, b.miner_tx, m_tree_sync); short_chain_history.clear(); get_short_chain_history(short_chain_history); fast_refresh(stop_height, blocks_start_height, short_chain_history, true); @@ -4396,6 +4525,16 @@ void wallet2::handle_reorg(uint64_t height, std::map dbd.original_chain_size, error::wallet_internal_error, + // "Chain got larger after reorg"); + // uint64_t n_blocks_detached = dbd.original_chain_size - m_blockchain.size(); + // while (n_blocks_detached > 0) + // { + // THROW_WALLET_EXCEPTION_IF(!m_tree_sync.pop_block(), error::wallet_internal_error, "Failed to pop block from tree"); + // --n_blocks_detached; + // } + if (m_callback) m_callback->on_reorg(height, dbd.detached_blockchain.size(), dbd.detached_tx_hashes.size()); } @@ -4433,6 +4572,7 @@ bool wallet2::clear() m_pool_info_query_time = 0; m_skip_to_height = 0; m_background_sync_data = background_sync_data_t{}; + m_tree_sync.clear(); return true; } //---------------------------------------------------------------------------------------------------- @@ -4455,8 +4595,10 @@ void wallet2::clear_soft(bool keep_key_images) cryptonote::block b; generate_genesis(b); - m_blockchain.push_back(get_block_hash(b)); + const crypto::hash genesis_hash = get_block_hash(b); + m_blockchain.push_back(genesis_hash); m_last_block_reward = cryptonote::get_outs_money_amount(b.miner_tx); + sync_genesis_block(genesis_hash, b.miner_tx, m_tree_sync); } //---------------------------------------------------------------------------------------------------- void wallet2::clear_user_data() @@ -5443,8 +5585,10 @@ void wallet2::setup_new_blockchain() { cryptonote::block b; generate_genesis(b); - m_blockchain.push_back(get_block_hash(b)); + const crypto::hash genesis_hash = get_block_hash(b); + m_blockchain.push_back(genesis_hash); m_last_block_reward = cryptonote::get_outs_money_amount(b.miner_tx); + sync_genesis_block(genesis_hash, b.miner_tx, m_tree_sync); add_subaddress_account(tr("Primary account")); } @@ -6489,6 +6633,37 @@ void wallet2::load(const std::string& wallet_, const epee::wipeable_string& pass generate_genesis(genesis); crypto::hash genesis_hash = get_block_hash(genesis); + if (m_tree_sync.get_output_count() == 0) + { + sync_genesis_block(genesis_hash, genesis.miner_tx, m_tree_sync); + + // clear the m_blockchain so that we re-sync the tree and get received output paths + m_blockchain.clear(); + } + else + { + const auto tree_root = epee::string_tools::pod_to_hex(m_tree_sync.get_tree_root()); + MINFO("Tree root: " << tree_root); + const auto n_leaf_tuples = m_tree_sync.get_n_leaf_tuples(); + MINFO("N leaf tuples: " << n_leaf_tuples); + + for (size_t i = 0; i < m_transfers.size(); ++i) + { + const auto &td = m_transfers[i]; + const auto pubkey = td.get_public_key(); + const auto commitment = td.is_rct() ? rct::commit(td.amount(), td.m_mask) : rct::zeroCommitVartime(td.amount()); + + const fcmp_pp::curve_trees::OutputPair output_pair{ .output_pubkey = pubkey, .commitment = commitment }; + fcmp_pp::curve_trees::CurveTrees::Path path; + CHECK_AND_ASSERT_THROW_MES(m_tree_sync.get_output_path(output_pair, path), "did not find output path"); + + const bool audit = fcmp_pp::curve_trees::curve_trees_v1()->audit_path(path, output_pair, n_leaf_tuples); + CHECK_AND_ASSERT_THROW_MES(audit, "path failed audit"); + + printf("Output %lu passed audit...\n", i); + } + } + if (m_blockchain.empty()) { m_blockchain.push_back(genesis_hash); @@ -14186,6 +14361,7 @@ void wallet2::import_blockchain(const std::tuple> wallet2::export_outputs(bool all, uint32_t start, uint32_t count) const diff --git a/src/wallet/wallet2.h b/src/wallet/wallet2.h index 26ed2d8c004..da2d9217126 100644 --- a/src/wallet/wallet2.h +++ b/src/wallet/wallet2.h @@ -1956,6 +1956,9 @@ namespace tools const std::vector> *m_multisig_rescan_k; std::unordered_map m_cold_key_images; + uint64_t m_sync_blocks_time_ms; + uint64_t m_outs_by_unlock_time_ms; + std::atomic m_run; boost::recursive_mutex m_daemon_rpc_mutex;